feat: reorganize supabase config - flat db init structure, add edge functions, mcp, kong api config

This commit is contained in:
kcar 2026-02-22 00:31:50 +00:00
parent 95af17c02d
commit 31ecf136f7
54 changed files with 4383 additions and 1106 deletions

View File

@ -1,139 +0,0 @@
## App Information
APP_NAME=ClassroomCopilot
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=localhost
APP_PROTOCOL=http
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=http://localhost,http://127.0.0.1
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## Supabase Analytics Configuration (Google Cloud)
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

View File

@ -0,0 +1,131 @@
BEGIN;
-- Create pg_net extension
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
-- Create supabase_functions schema
CREATE SCHEMA IF NOT EXISTS supabase_functions AUTHORIZATION postgres;
-- Grant basic permissions
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition
CREATE TABLE IF NOT EXISTS supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
-- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial') ON CONFLICT DO NOTHING;
-- supabase_functions.hooks definition
CREATE TABLE IF NOT EXISTS supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
-- Create indexes if they don't exist
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
-- Create the http_request function
CREATE OR REPLACE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{"Content-Type": "application/json"}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(
url,
params,
headers,
timeout_ms
);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD,
'record', NEW,
'type', TG_OP,
'table', TG_TABLE_NAME,
'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(
url,
payload,
params,
headers,
timeout_ms
);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks
(hook_table_id, hook_name, request_id)
VALUES
(TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$function$;
-- Set function properties
ALTER FUNCTION supabase_functions.http_request() SECURITY DEFINER;
ALTER FUNCTION supabase_functions.http_request() SET search_path = supabase_functions;
-- Grant execute permissions
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
-- Grant pg_net permissions
GRANT USAGE ON SCHEMA net TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
-- Add migration record
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants') ON CONFLICT DO NOTHING;
COMMIT;

View File

@ -0,0 +1,6 @@
-- Set JWT configuration for the database
-- These settings will be configured through environment variables in the Supabase setup
-- Note: JWT configuration is handled by Supabase's internal configuration
-- This file is kept for reference but the actual JWT settings are managed
-- through the Supabase configuration and environment variables

View File

@ -0,0 +1,11 @@
-- NOTE: change to your own passwords for production environments
-- Password configuration is handled by Supabase's internal setup
-- This file is kept for reference but the actual password settings are managed
-- through the Supabase configuration and environment variables
-- The following users are created and configured by Supabase automatically:
-- - authenticator
-- - pgbouncer
-- - supabase_auth_admin
-- - supabase_functions_admin
-- - supabase_storage_admin

View File

@ -0,0 +1,364 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Core schema setup for ClassConcepts with neoFS filesystem integration
-- Dependencies: auth.users (Supabase Auth)
--[ Validation ]--
do $$
begin
-- Verify required extensions
if not exists (select 1 from pg_extension where extname = 'uuid-ossp') then
raise exception 'Required extension uuid-ossp is not installed';
end if;
-- Verify auth schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'auth') then
raise exception 'Required auth schema is not available';
end if;
-- Verify storage schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'storage') then
raise exception 'Required storage schema is not available';
end if;
end $$;
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
-- Create completed_at trigger function for document artefacts
create or replace function public.set_completed_at()
returns trigger as $$
begin
if NEW.status = 'completed' and OLD.status != 'completed' then
NEW.completed_at = now();
end if;
return NEW;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (
user_type in (
'teacher',
'student',
'email_teacher',
'email_student',
'developer',
'superadmin'
)
),
username text not null unique,
full_name text,
display_name text,
metadata jsonb default '{}'::jsonb,
user_db_name text,
school_db_name text,
neo4j_sync_status text default 'pending' check (neo4j_sync_status in ('pending', 'ready', 'failed')),
neo4j_synced_at timestamp with time zone,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: teacher or student';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
geo_coordinates jsonb default '{}'::jsonb,
neo4j_uuid_string text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
comment on column public.institutes.geo_coordinates is 'Geospatial coordinates from OSM search (latitude, longitude, boundingbox)';
--[ 6. neoFS Filesystem Tables ]--
-- File cabinets for organizing files
create table if not exists public.file_cabinets (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.file_cabinets is 'User file cabinets for organizing documents and files';
-- Files stored in cabinets
create table if not exists public.files (
id uuid primary key default uuid_generate_v4(),
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
name text not null,
path text not null,
bucket text default 'file-cabinets' not null,
created_at timestamp with time zone default timezone('utc'::text, now()),
mime_type text,
metadata jsonb default '{}'::jsonb,
size text,
category text generated always as (
case
when mime_type like 'image/%' then 'image'
when mime_type = 'application/pdf' then 'document'
when mime_type in ('application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document') then 'document'
when mime_type in ('application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') then 'spreadsheet'
when mime_type in ('application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation') then 'presentation'
when mime_type like 'audio/%' then 'audio'
when mime_type like 'video/%' then 'video'
else 'other'
end
) stored
);
comment on table public.files is 'Files stored in user cabinets with automatic categorization';
comment on column public.files.category is 'Automatically determined file category based on MIME type';
-- AI brains for processing files
create table if not exists public.brains (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
purpose text,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.brains is 'AI brains for processing and analyzing user files';
-- Brain-file associations
create table if not exists public.brain_files (
brain_id uuid not null references public.brains(id) on delete cascade,
file_id uuid not null references public.files(id) on delete cascade,
primary key (brain_id, file_id)
);
comment on table public.brain_files is 'Associations between AI brains and files for processing';
-- Document artefacts from file processing
create table if not exists public.document_artefacts (
id uuid primary key default uuid_generate_v4(),
file_id uuid references public.files(id) on delete cascade,
page_number integer default 0 not null,
type text not null,
rel_path text not null,
size_tag text,
language text,
chunk_index integer,
extra jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
status text default 'completed' not null check (status in ('pending', 'processing', 'completed', 'failed')),
started_at timestamp with time zone default timezone('utc'::text, now()),
completed_at timestamp with time zone,
error_message text
);
comment on table public.document_artefacts is 'Extracted artefacts from document processing';
comment on column public.document_artefacts.status is 'Extraction status: pending, processing, completed, or failed';
comment on column public.document_artefacts.started_at is 'Timestamp when extraction process started';
comment on column public.document_artefacts.completed_at is 'Timestamp when extraction process completed (success or failure)';
comment on column public.document_artefacts.error_message is 'Error details if extraction failed';
-- Function execution logs
create table if not exists public.function_logs (
id serial primary key,
file_id uuid references public.files(id) on delete cascade,
timestamp timestamp with time zone default timezone('utc'::text, now()),
step text,
message text,
data jsonb
);
comment on table public.function_logs is 'Logs of function executions and processing steps';
--[ 7. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 8. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 9. Exam Specifications ]--
create table if not exists public.eb_specifications (
id uuid primary key default uuid_generate_v4(),
spec_code text unique,
exam_board_code text,
award_code text,
subject_code text,
first_teach text,
spec_ver text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_specifications is 'Exam board specifications and their primary document';
comment on column public.eb_specifications.spec_code is 'Unique code for the specification, used for linking exams';
comment on column public.eb_specifications.doc_details is 'Tika extract of the specification document';
comment on column public.eb_specifications.docling_docs is 'Docling extracts settings and storage locations for the specification document';
--[ 10. Exam Papers / Entries ]--
create table if not exists public.eb_exams (
id uuid primary key default uuid_generate_v4(),
exam_code text unique,
spec_code text references public.eb_specifications(spec_code) on delete cascade,
paper_code text,
tier text,
session text,
type_code text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_exams is 'Exam papers and related documents linked to specifications';
comment on column public.eb_exams.exam_code is 'Unique code for the exam paper, used for linking questions';
comment on column public.eb_exams.type_code is 'Type code for the exam document: Question Paper (QP), Mark Scheme (MS), Examiner Report (ER), Other (OT)';
comment on column public.eb_exams.doc_details is 'Tika extract of the exam paper document';
comment on column public.eb_exams.docling_docs is 'Docling extracts settings and storage locations for the exam paper document';
--[ 11. Indexes ]--
-- Index for geospatial queries
create index if not exists idx_institutes_geo_coordinates on public.institutes using gin(geo_coordinates);
create index if not exists idx_institutes_urn on public.institutes(urn);
-- Document artefacts indexes
create index if not exists idx_document_artefacts_file_status on public.document_artefacts(file_id, status);
create index if not exists idx_document_artefacts_file_type on public.document_artefacts(file_id, type);
create index if not exists idx_document_artefacts_status on public.document_artefacts(status);
-- File indexes
create index if not exists idx_files_cabinet_id on public.files(cabinet_id);
create index if not exists idx_files_mime_type on public.files(mime_type);
create index if not exists idx_files_category on public.files(category);
-- Brain indexes
create index if not exists idx_brains_user_id on public.brains(user_id);
-- Exam board indexes
create index if not exists idx_eb_exams_exam_code on public.eb_exams(exam_code);
create index if not exists idx_eb_exams_spec_code on public.eb_exams(spec_code);
create index if not exists idx_eb_exams_paper_code on public.eb_exams(paper_code);
create index if not exists idx_eb_exams_tier on public.eb_exams(tier);
create index if not exists idx_eb_exams_session on public.eb_exams(session);
create index if not exists idx_eb_exams_type_code on public.eb_exams(type_code);
create index if not exists idx_eb_specifications_spec_code on public.eb_specifications(spec_code);
create index if not exists idx_eb_specifications_exam_board_code on public.eb_specifications(exam_board_code);
create index if not exists idx_eb_specifications_award_code on public.eb_specifications(award_code);
create index if not exists idx_eb_specifications_subject_code on public.eb_specifications(subject_code);
--[ 12. Triggers ]--
-- Set completed_at when document artefact status changes to completed
create trigger trigger_set_completed_at
before update on public.document_artefacts
for each row
execute function public.set_completed_at();
-- Set updated_at on profile updates
create trigger trigger_profiles_updated_at
before update on public.profiles
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute updates
create trigger trigger_institutes_updated_at
before update on public.institutes
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_memberships updates
create trigger trigger_institute_memberships_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_membership_requests updates
create trigger trigger_institute_membership_requests_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_specifications updates
create trigger trigger_eb_specifications_updated_at
before update on public.eb_specifications
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_exams updates
create trigger trigger_eb_exams_updated_at
before update on public.eb_exams
for each row
execute function public.handle_updated_at();

View File

@ -0,0 +1,191 @@
--[ 8. Auth Functions ]--
-- Create a secure function to check admin status
create or replace function public.is_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create a secure function to check super admin status
create or replace function public.is_super_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create public wrapper functions
-- Note: These are now the main implementation functions, not wrappers
-- The original auth schema functions have been moved to public schema
-- Grant execute permissions
grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated;
-- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
declare
result json;
begin
-- Only allow this to run as service role or superuser
if not (
current_user = 'service_role'
or exists (
select 1 from pg_roles
where rolname = current_user
and rolsuper
)
) then
raise exception 'Must be run as service_role or superuser';
end if;
-- Update user_type and username for admin
update public.profiles
set user_type = 'admin',
username = coalesce(username, 'superadmin'),
display_name = coalesce(display_name, 'Super Admin')
where email = admin_email
returning json_build_object(
'id', id,
'email', email,
'user_type', user_type,
'username', username,
'display_name', display_name
) into result;
if result is null then
raise exception 'Admin user with email % not found', admin_email;
end if;
return result;
end;
$$;
-- Grant execute permissions
revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role;
-- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
begin
return public.setup_initial_admin(admin_email);
end;
$$;
-- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role;
--[ 9. Utility Functions ]--
-- Check if database is ready
create or replace function check_db_ready()
returns boolean
language plpgsql
security definer
as $$
begin
-- Check if essential schemas exist
if not exists (
select 1
from information_schema.schemata
where schema_name in ('auth', 'storage', 'public')
) then
return false;
end if;
-- Check if essential tables exist
if not exists (
select 1
from information_schema.tables
where table_schema = 'auth'
and table_name = 'users'
) then
return false;
end if;
-- Check if RLS is enabled on public.profiles
if not exists (
select 1
from pg_tables
where schemaname = 'public'
and tablename = 'profiles'
and rowsecurity = true
) then
return false;
end if;
return true;
end;
$$;
-- Grant execute permission
grant execute on function check_db_ready to anon, authenticated, service_role;
-- Function to handle new user registration
create or replace function public.handle_new_user()
returns trigger
language plpgsql
security definer set search_path = public
as $$
declare
default_user_type text := 'email_student';
default_username text;
begin
-- Generate username from email
default_username := split_part(new.email, '@', 1);
insert into public.profiles (
id,
email,
user_type,
username,
display_name
)
values (
new.id,
new.email,
coalesce(new.raw_user_meta_data->>'user_type', default_user_type),
coalesce(new.raw_user_meta_data->>'username', default_username),
coalesce(new.raw_user_meta_data->>'display_name', default_username)
);
return new;
end;
$$;
-- Trigger for new user creation
drop trigger if exists on_auth_user_created on auth.users;
create trigger on_auth_user_created
after insert on auth.users
for each row execute procedure public.handle_new_user();
--[ 11. Database Triggers ]--
drop trigger if exists handle_profiles_updated_at on public.profiles;
create trigger handle_profiles_updated_at
before update on public.profiles
for each row execute function public.handle_updated_at();
drop trigger if exists handle_institute_memberships_updated_at on public.institute_memberships;
create trigger handle_institute_memberships_updated_at
before update on public.institute_memberships
for each row execute function public.handle_updated_at();
drop trigger if exists handle_membership_requests_updated_at on public.institute_membership_requests;
create trigger handle_membership_requests_updated_at
before update on public.institute_membership_requests
for each row execute function public.handle_updated_at();

View File

@ -0,0 +1,20 @@
-- Storage policies configuration for Supabase
-- Note: Storage bucket policies are managed by Supabase internally
-- This file provides guidance on what should be configured
-- Storage bucket policies should be configured through:
-- 1. Supabase Dashboard > Storage > Policies
-- 2. Or via SQL with proper permissions (requires service_role or owner access)
-- Recommended policies for storage.buckets:
-- - Super admin has full access to buckets
-- - Users can create their own buckets
-- - Users can view their own buckets or public buckets
-- Recommended policies for storage.objects:
-- - Users can upload to buckets they own
-- - Users can view objects in public buckets
-- - Users can manage objects in buckets they own
-- Note: These policies require the service_role or appropriate permissions
-- to be applied to the storage schema tables

View File

@ -0,0 +1,20 @@
-- Initial admin setup for ClassroomCopilot
-- This file handles basic database setup and permissions
-- Ensure uuid-ossp extension is enabled
create extension if not exists "uuid-ossp" schema extensions;
-- Grant basic permissions to authenticated users for public schema
-- Note: These permissions are granted to allow users to work with the application
grant usage on schema public to authenticated;
grant all on all tables in schema public to authenticated;
grant all on all sequences in schema public to authenticated;
grant all on all functions in schema public to authenticated;
-- Set default privileges for future objects
alter default privileges in schema public grant all on tables to authenticated;
alter default privileges in schema public grant all on sequences to authenticated;
alter default privileges in schema public grant all on functions to authenticated;
-- Note: The setup_initial_admin function is defined in 62-functions-triggers.sql
-- and should be called with an admin email parameter when needed

View File

@ -0,0 +1,95 @@
-- Files table augments and storage GC hooks
-- 1) Add columns to files if missing
do $$
begin
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='uploaded_by'
) then
alter table public.files add column uploaded_by uuid references public.profiles(id);
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='size_bytes'
) then
alter table public.files add column size_bytes bigint;
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='source'
) then
alter table public.files add column source text default 'uploader-web';
end if;
end $$;
-- 2) Unique index for cabinet/path combo
create unique index if not exists uq_files_cabinet_path on public.files(cabinet_id, path);
-- 3) Storage GC helpers (ported from neoFS with storage schema)
create or replace function public._delete_storage_objects(p_bucket text, p_path text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_path is null then
return;
end if;
delete from storage.objects where bucket_id = p_bucket and name = p_path;
delete from storage.objects where bucket_id = p_bucket and name like p_path || '/%';
end
$$;
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if tg_op = 'DELETE' then
perform public._delete_storage_objects(old.bucket, old.path);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
end if;
end if;
return null;
end
$$;
-- 4) Attach GC trigger to files bucket/path changes
drop trigger if exists trg_files_gc on public.files;
create trigger trg_files_gc
after delete or update of bucket, path on public.files
for each row execute function public._storage_gc_sql();
-- 5) Document artefacts GC: remove artefact objects from storage when rows change/delete
create or replace function public._artefact_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_bucket text;
begin
if tg_op = 'DELETE' then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
return old;
elsif tg_op = 'UPDATE' then
if (old.rel_path is distinct from new.rel_path) or (old.file_id is distinct from new.file_id) then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
end if;
return new;
end if;
end
$$;
drop trigger if exists trg_document_artefacts_gc on public.document_artefacts;
create trigger trg_document_artefacts_gc
before delete or update of file_id, rel_path on public.document_artefacts
for each row execute function public._artefact_gc_sql();

View File

@ -0,0 +1,84 @@
-- Enable RLS and define policies for filesystem tables
-- 1) Enable RLS
alter table if exists public.file_cabinets enable row level security;
alter table if exists public.files enable row level security;
alter table if exists public.brain_files enable row level security;
alter table if exists public.document_artefacts enable row level security;
drop policy if exists "User can access own cabinets" on public.file_cabinets;
create policy "User can access own cabinets" on public.file_cabinets
using (user_id = auth.uid())
with check (user_id = auth.uid());
drop policy if exists "User can access files in own cabinet" on public.files;
create policy "User can access files in own cabinet" on public.files
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can insert files into own cabinet" on public.files;
create policy "User can insert files into own cabinet" on public.files for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can update files in own cabinet" on public.files;
create policy "User can update files in own cabinet" on public.files for update to authenticated
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can delete files from own cabinet" on public.files;
create policy "User can delete files from own cabinet" on public.files for delete
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
-- 4) Brain-files: allow linking owned files to owned brains
drop policy if exists "User can link files they own to their brains" on public.brain_files;
create policy "User can link files they own to their brains" on public.brain_files
using (
exists (select 1 from public.brains b where b.id = brain_files.brain_id and b.user_id = auth.uid())
and exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = brain_files.file_id and c.user_id = auth.uid()
)
)
with check (true);
-- 5) Document artefacts: allow reads to owners via file cabinet, writes via service_role
drop policy if exists "artefacts_read_by_owner" on public.document_artefacts;
create policy "artefacts_read_by_owner" on public.document_artefacts for select to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
drop policy if exists "artefacts_rw_service" on public.document_artefacts;
create policy "artefacts_rw_service" on public.document_artefacts to service_role
using (true) with check (true);
-- Allow owners to delete their artefacts (needed for cascades under RLS)
drop policy if exists "artefacts_delete_by_owner" on public.document_artefacts;
create policy "artefacts_delete_by_owner" on public.document_artefacts for delete to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
-- File vectors RLS and policies are defined in 67-vectors.sql after the table is created

View File

@ -0,0 +1,79 @@
-- Vectors: file_vectors table and similarity search function
-- 1) Ensure pgvector extension is available
create extension if not exists vector;
-- 2) File vectors table
create table if not exists public.file_vectors (
id bigint generated by default as identity primary key,
created_at timestamp with time zone default now() not null,
embedding public.vector,
metadata jsonb,
content text
);
-- 3) ANN index (skipped until embedding dimension is fixed)
-- To enable: set column type to public.vector(<dim>) and uncomment:
-- create index if not exists file_vectors_embedding_idx
-- on public.file_vectors using ivfflat (embedding public.vector_cosine_ops)
-- with (lists='100');
-- 3b) Enable RLS and set policies (moved here to avoid ordering issues)
alter table if exists public.file_vectors enable row level security;
drop policy if exists "vectors_read_by_owner" on public.file_vectors;
create policy "vectors_read_by_owner" on public.file_vectors for select to authenticated
using (coalesce((metadata->>'file_id')::uuid, null) is null or exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = (metadata->>'file_id')::uuid and c.user_id = auth.uid()
));
drop policy if exists "vectors_rw_service" on public.file_vectors;
create policy "vectors_rw_service" on public.file_vectors to service_role
using (true) with check (true);
-- 4) Match function mirrored from neoFS (generic metadata mapping)
create or replace function public.match_file_vectors(
filter jsonb,
match_count integer,
query_embedding public.vector
)
returns table (
id bigint,
file_id uuid,
cabinet_id uuid,
artefact_type text,
artefact_is text,
original_path_prefix text,
original_filename text,
content text,
metadata jsonb,
similarity double precision
)
language sql stable as $$
select
fv.id,
nullif(fv.metadata->>'file_id','')::uuid as file_id,
nullif(fv.metadata->>'cabinet_id','')::uuid as cabinet_id,
nullif(fv.metadata->>'artefact_type','') as artefact_type,
nullif(fv.metadata->>'artefact_is','') as artefact_is,
nullif(fv.metadata->>'original_path_prefix','') as original_path_prefix,
nullif(fv.metadata->>'original_filename','') as original_filename,
fv.content,
fv.metadata,
1 - (fv.embedding <=> query_embedding) as similarity
from public.file_vectors fv
where
(coalesce(filter ? 'file_id', false) = false or (fv.metadata->>'file_id')::uuid = (filter->>'file_id')::uuid)
and (coalesce(filter ? 'cabinet_id', false) = false or (fv.metadata->>'cabinet_id')::uuid = (filter->>'cabinet_id')::uuid)
and (coalesce(filter ? 'artefact_type', false) = false or (fv.metadata->>'artefact_type') = (filter->>'artefact_type'))
and (coalesce(filter ? 'artefact_id', false) = false or (fv.metadata->>'artefact_id') = (filter->>'artefact_id'))
and (coalesce(filter ? 'original_path_prefix', false) = false or (fv.metadata->>'original_path_prefix') like (filter->>'original_path_prefix') || '%')
and (coalesce(filter ? 'original_path_prefix_ilike', false)= false or (fv.metadata->>'original_path_prefix') ilike (filter->>'original_path_prefix_ilike') || '%')
and (coalesce(filter ? 'original_filename', false) = false or (fv.metadata->>'original_filename') = (filter->>'original_filename'))
and (coalesce(filter ? 'original_filename_ilike', false)= false or (fv.metadata->>'original_filename') ilike (filter->>'original_filename_ilike'))
order by fv.embedding <=> query_embedding
limit greatest(coalesce(match_count, 10), 1)
$$;

View File

@ -0,0 +1,73 @@
-- Cabinet memberships for sharing access
create table if not exists public.cabinet_memberships (
id uuid default uuid_generate_v4() primary key,
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
profile_id uuid not null references public.profiles(id) on delete cascade,
role text not null check (role in ('owner','editor','viewer')),
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(cabinet_id, profile_id)
);
create index if not exists idx_cabinet_memberships_cabinet on public.cabinet_memberships(cabinet_id);
create index if not exists idx_cabinet_memberships_profile on public.cabinet_memberships(profile_id);
-- Updated at trigger
drop trigger if exists trg_cabinet_memberships_updated_at on public.cabinet_memberships;
create trigger trg_cabinet_memberships_updated_at
before update on public.cabinet_memberships
for each row execute function public.handle_updated_at();
-- RLS and policies
alter table if exists public.cabinet_memberships enable row level security;
-- Members can select their own memberships; cabinet owners can also see memberships
drop policy if exists cm_read_self_or_owner on public.cabinet_memberships;
create policy cm_read_self_or_owner on public.cabinet_memberships for select to authenticated
using (
profile_id = auth.uid() or exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
)
);
-- Cabinet owners can insert memberships
drop policy if exists cm_insert_by_owner on public.cabinet_memberships;
create policy cm_insert_by_owner on public.cabinet_memberships for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can update memberships (e.g., role)
drop policy if exists cm_update_by_owner on public.cabinet_memberships;
create policy cm_update_by_owner on public.cabinet_memberships for update to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can delete memberships
drop policy if exists cm_delete_by_owner on public.cabinet_memberships;
create policy cm_delete_by_owner on public.cabinet_memberships for delete to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Extend access to cabinets/files for members (after table exists)
drop policy if exists "User can access cabinets via membership" on public.file_cabinets;
create policy "User can access cabinets via membership" on public.file_cabinets for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = file_cabinets.id and m.profile_id = auth.uid()
));
drop policy if exists "User can access files via membership" on public.files;
create policy "User can access files via membership" on public.files for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = files.cabinet_id and m.profile_id = auth.uid()
));

View File

@ -0,0 +1,48 @@
-- Ensure storage objects for all artefacts are removed when a file is deleted
-- by deleting the entire "cabinet_id/file_id" directory prefix in Storage.
-- Helper to delete all objects under a prefix
create or replace function public._delete_storage_prefix(p_bucket text, p_prefix text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_prefix is null then
return;
end if;
-- Delete any objects whose name starts with the prefix + '/'
delete from storage.objects where bucket_id = p_bucket and name like p_prefix || '/%';
-- In case an object exists exactly at the prefix (rare but safe)
delete from storage.objects where bucket_id = p_bucket and name = p_prefix;
end
$$;
-- Update file-level GC to also delete the parent directory prefix (cabinet_id/file_id)
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_prefix text;
begin
-- Derive directory prefix from the file path by removing the last segment (filename)
-- Example: 'cabinet_id/file_id/filename.ext' -> 'cabinet_id/file_id'
v_prefix := regexp_replace(old.path, '/[^/]+$', '');
if tg_op = 'DELETE' then
-- Delete the original object and any artefacts under the file's directory
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
end if;
end if;
return null;
end
$$;

View File

@ -0,0 +1,41 @@
-- Add directory support to files table
-- Migration: Add directory support for folder uploads
-- Add new columns to files table
ALTER TABLE files
ADD COLUMN IF NOT EXISTS is_directory BOOLEAN DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS parent_directory_id UUID REFERENCES files(id) ON DELETE CASCADE,
ADD COLUMN IF NOT EXISTS relative_path TEXT,
ADD COLUMN IF NOT EXISTS directory_manifest JSONB,
ADD COLUMN IF NOT EXISTS upload_session_id UUID,
ADD COLUMN IF NOT EXISTS processing_status TEXT DEFAULT 'uploaded' CHECK (processing_status IN ('uploaded', 'processing', 'completed', 'failed', 'queued'));
-- Create index for efficient directory queries
CREATE INDEX IF NOT EXISTS idx_files_parent_directory ON files(parent_directory_id);
CREATE INDEX IF NOT EXISTS idx_files_upload_session ON files(upload_session_id);
CREATE INDEX IF NOT EXISTS idx_files_processing_status ON files(processing_status);
CREATE INDEX IF NOT EXISTS idx_files_is_directory ON files(is_directory);
-- Create directory manifest structure
COMMENT ON COLUMN files.is_directory IS 'True if this record represents a directory/folder';
COMMENT ON COLUMN files.parent_directory_id IS 'ID of parent directory if this file is inside an uploaded folder';
COMMENT ON COLUMN files.relative_path IS 'Relative path within the uploaded directory structure';
COMMENT ON COLUMN files.directory_manifest IS 'JSON manifest of directory contents including file count, total size, structure';
COMMENT ON COLUMN files.upload_session_id IS 'Groups files uploaded together in a single directory upload session';
COMMENT ON COLUMN files.processing_status IS 'Simple status tracking without auto-processing';
-- Example directory_manifest structure:
-- {
-- "total_files": 15,
-- "total_size_bytes": 12345678,
-- "directory_structure": {
-- "documents/": {
-- "file1.pdf": {"size": 123456, "mime_type": "application/pdf"},
-- "subdirectory/": {
-- "file2.docx": {"size": 234567, "mime_type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document"}
-- }
-- }
-- },
-- "upload_timestamp": "2024-09-23T12:00:00Z",
-- "upload_method": "directory_picker"
-- }

View File

@ -0,0 +1,7 @@
-- Create _supabase database for internal Supabase operations
-- This database is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual database creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _supabase database is created with the postgres user as owner
-- by default during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _analytics schema for Supabase analytics
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _analytics schema is created in the _supabase database
-- with appropriate ownership during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _realtime schema for Supabase realtime functionality
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _realtime schema is created with appropriate ownership
-- during Supabase initialization

View File

@ -0,0 +1,7 @@
-- Create _supavisor schema for Supabase connection pooling
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
-- Note: The _supavisor schema is created in the _supabase database
-- with appropriate ownership during Supabase initialization

View File

@ -1,487 +0,0 @@
services:
# Supabase containers
studio:
container_name: supabase-studio
image: supabase/studio:20250113-83c9420
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"fetch('http://studio:3000/api/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})",
]
timeout: 10s
interval: 5s
retries: 3
depends_on:
analytics:
condition: service_healthy
ports:
- ${PORT_SUPABASE_STUDIO}:3000
env_file:
- .env
environment:
STUDIO_PG_META_URL: http://meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DEFAULT_PROJECT_ID: "ClassroomCopilot"
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_URL: http://analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
networks:
- kevlarai-network
kong:
container_name: supabase-kong
image: kong:2.8.1
restart: unless-stopped
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
ports:
- ${KONG_HTTP_PORT}:8000/tcp
- ${KONG_HTTPS_PORT}:8443/tcp
depends_on:
analytics:
condition: service_healthy
env_file:
- .env
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
KONG_PROXY_ACCESS_LOG: "/dev/stdout"
KONG_ADMIN_ACCESS_LOG: "/dev/stdout"
KONG_PROXY_ERROR_LOG: "/dev/stderr"
KONG_ADMIN_ERROR_LOG: "/dev/stderr"
KONG_CORS_ORIGINS: "*"
KONG_CORS_METHODS: "GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS"
KONG_CORS_HEADERS: "DNT,X-Auth-Token,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,apikey,x-client-info"
KONG_CORS_EXPOSED_HEADERS: "Content-Length,Content-Range"
KONG_CORS_MAX_AGE: 3600
volumes:
- ./api/kong.yml:/home/kong/temp.yml:ro
networks:
- kevlarai-network
auth:
container_name: supabase-auth
image: supabase/gotrue:v2.167.0
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
env_file:
- .env
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_LOG_LEVEL: ${AUTH_LOG_LEVEL}
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: ${MAILER_SECURE_EMAIL_CHANGE_ENABLED}
GOTRUE_MAILER_EXTERNAL_HOSTS: "localhost,admin.localhost,kong,supabase.classroomcopilot.ai,classroomcopilot.ai"
GOTRUE_MAILER_EXTERNAL_HOSTS_ALLOW_REGEX: ".*\\.classroomcopilot\\.ai$"
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_EXTERNAL_AZURE_ENABLED: ${AZURE_ENABLED}
GOTRUE_EXTERNAL_AZURE_CLIENT_ID: ${AZURE_CLIENT_ID}
GOTRUE_EXTERNAL_AZURE_SECRET: ${AZURE_SECRET}
GOTRUE_EXTERNAL_AZURE_REDIRECT_URI: ${AZURE_REDIRECT_URI}
networks:
- kevlarai-network
rest:
container_name: supabase-rest
image: postgrest/postgrest:v12.2.0
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
env_file:
- .env
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command: "postgrest"
networks:
- kevlarai-network
realtime:
container_name: supabase-realtime
image: supabase/realtime:v2.34.7
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"-H",
"Authorization: Bearer ${ANON_KEY}",
"http://localhost:4000/api/tenants/realtime-dev/health",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
env_file:
- .env
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: "SET search_path TO _realtime"
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: true
RUN_JANITOR: true
networks:
- kevlarai-network
storage:
container_name: supabase-storage
image: supabase/storage-api:v1.14.5
depends_on:
db:
condition: service_healthy
rest:
condition: service_started
imgproxy:
condition: service_started
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://storage:5000/status",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
env_file:
- .env
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub
REGION: stub
GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:5001
volumes:
- ./storage:/var/lib/storage:z
networks:
- kevlarai-network
imgproxy:
container_name: supabase-imgproxy
image: darthsim/imgproxy:v3.8.0
healthcheck:
test: ["CMD", "imgproxy", "health"]
timeout: 10s
interval: 5s
retries: 10
env_file:
- .env
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
networks:
- kevlarai-network
meta:
container_name: supabase-meta
image: supabase/postgres-meta:v0.84.2
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
env_file:
- .env
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
networks:
- kevlarai-network
functions:
container_name: supabase-edge-functions
image: supabase/edge-runtime:v1.67.0
restart: unless-stopped
depends_on:
analytics:
condition: service_healthy
env_file:
- .env
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
volumes:
- ./functions:/home/deno/functions:Z
command:
- start
- --main-service
- /home/deno/functions/main
networks:
- kevlarai-network
analytics:
container_name: supabase-analytics
image: supabase/logflare:1.4.0
healthcheck:
test: ["CMD", "curl", "http://localhost:4000/health"]
timeout: 10s
interval: 5s
retries: 10
restart: unless-stopped
depends_on:
db:
condition: service_healthy
env_file:
- .env
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: _supabase
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
LOGFLARE_MIN_CLUSTER_SIZE: 1
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
ports:
- 4000:4000
networks:
- kevlarai-network
db:
container_name: supabase-db
image: supabase/postgres:15.8.1.020
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -h localhost || exit 1"]
interval: 10s
timeout: 5s
retries: 20
start_period: 30s
depends_on:
vector:
condition: service_healthy
command:
- postgres
- -c
- config_file=/etc/postgresql/postgresql.conf
- -c
- log_min_messages=fatal
restart: unless-stopped
env_file:
- .env
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
volumes:
- ./db/migrations/supabase/50-_supabase.sql:/docker-entrypoint-initdb.d/migrations/50-_supabase.sql
- ./db/migrations/supabase/52-realtime.sql:/docker-entrypoint-initdb.d/migrations/52-realtime.sql
- ./db/migrations/supabase/52-pooler.sql:/docker-entrypoint-initdb.d/migrations/52-pooler.sql
- ./db/migrations/supabase/52-logs.sql:/docker-entrypoint-initdb.d/migrations/52-logs.sql
- ./db/init-scripts/51-webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/51-webhooks.sql
- ./db/init-scripts/52-roles.sql:/docker-entrypoint-initdb.d/init-scripts/52-roles.sql
- ./db/init-scripts/52-jwt.sql:/docker-entrypoint-initdb.d/init-scripts/52-jwt.sql
- ./db/migrations/core/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql
- ./db/migrations/core/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql
- ./db/migrations/core/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql
- ./db/migrations/core/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql
- supabase-db-data:/var/lib/postgresql/data
- supabase-db-config:/etc/postgresql-custom
networks:
- kevlarai-network
vector:
container_name: supabase-vector
image: timberio/vector:0.28.1-alpine
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health",
]
timeout: 10s
interval: 10s
retries: 10
volumes:
- ./logs/vector.yml:/etc/vector/vector.yml:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
env_file:
- .env
environment:
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
command: ["--config", "/etc/vector/vector.yml"]
networks:
- kevlarai-network
supavisor:
container_name: supabase-pooler
image: supabase/supavisor:1.1.56
healthcheck:
test: curl -sSfL --head -o /dev/null "http://127.0.0.1:4000/api/health"
interval: 10s
timeout: 10s
retries: 10
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
command:
- /bin/sh
- -c
- /app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server
restart: unless-stopped
ports:
- ${POSTGRES_PORT}:5432
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
env_file:
- .env
environment:
- PORT=4000
- POSTGRES_PORT=${POSTGRES_PORT}
- POSTGRES_DB=${POSTGRES_DB}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- DATABASE_URL=ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
- CLUSTER_POSTGRES=true
- SECRET_KEY_BASE=${SECRET_KEY_BASE}
- VAULT_ENC_KEY=${VAULT_ENC_KEY}
- API_JWT_SECRET=${JWT_SECRET}
- METRICS_JWT_SECRET=${JWT_SECRET}
- REGION=local
- ERL_AFLAGS=-proto_dist inet_tcp
- POOLER_TENANT_ID=${POOLER_TENANT_ID}
- POOLER_DEFAULT_POOL_SIZE=${POOLER_DEFAULT_POOL_SIZE}
- POOLER_MAX_CLIENT_CONN=${POOLER_MAX_CLIENT_CONN}
- POOLER_POOL_MODE=transaction
volumes:
- ./pooler/pooler.exs:/etc/pooler/pooler.exs:ro
networks:
- kevlarai-network
volumes:
supabase-db-config:
driver: local
supabase-db-data:
driver: local
networks:
kevlarai-network:
name: kevlarai-network
driver: bridge

View File

@ -1,150 +0,0 @@
{
"name": "supabase",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"jsonwebtoken": "^9.0.2"
}
},
"node_modules/buffer-equal-constant-time": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
"license": "BSD-3-Clause"
},
"node_modules/ecdsa-sig-formatter": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
"integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==",
"license": "Apache-2.0",
"dependencies": {
"safe-buffer": "^5.0.1"
}
},
"node_modules/jsonwebtoken": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz",
"integrity": "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==",
"license": "MIT",
"dependencies": {
"jws": "^3.2.2",
"lodash.includes": "^4.3.0",
"lodash.isboolean": "^3.0.3",
"lodash.isinteger": "^4.0.4",
"lodash.isnumber": "^3.0.3",
"lodash.isplainobject": "^4.0.6",
"lodash.isstring": "^4.0.1",
"lodash.once": "^4.0.0",
"ms": "^2.1.1",
"semver": "^7.5.4"
},
"engines": {
"node": ">=12",
"npm": ">=6"
}
},
"node_modules/jwa": {
"version": "1.4.2",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.2.tgz",
"integrity": "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==",
"license": "MIT",
"dependencies": {
"buffer-equal-constant-time": "^1.0.1",
"ecdsa-sig-formatter": "1.0.11",
"safe-buffer": "^5.0.1"
}
},
"node_modules/jws": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz",
"integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==",
"license": "MIT",
"dependencies": {
"jwa": "^1.4.1",
"safe-buffer": "^5.0.1"
}
},
"node_modules/lodash.includes": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz",
"integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==",
"license": "MIT"
},
"node_modules/lodash.isboolean": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz",
"integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==",
"license": "MIT"
},
"node_modules/lodash.isinteger": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz",
"integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==",
"license": "MIT"
},
"node_modules/lodash.isnumber": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz",
"integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==",
"license": "MIT"
},
"node_modules/lodash.isplainobject": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
"integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==",
"license": "MIT"
},
"node_modules/lodash.isstring": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
"integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==",
"license": "MIT"
},
"node_modules/lodash.once": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz",
"integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==",
"license": "MIT"
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/semver": {
"version": "7.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
"integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
}
}
}

View File

@ -1,5 +0,0 @@
{
"dependencies": {
"jsonwebtoken": "^9.0.2"
}
}

7
.gitignore vendored
View File

@ -3,8 +3,11 @@
.env.* .env.*
!.env.example !.env.example
# Docker volumes (large runtime data) # Docker volume RUNTIME data (large binary/runtime files - not schema SQL)
volumes/ volumes/db/data/
volumes/storage/
volumes/pooler/
volumes/logs/
# Backup files # Backup files
*.bak *.bak

View File

@ -1 +0,0 @@
v2.75.0

View File

@ -1,305 +0,0 @@
# For detailed configuration reference documentation, visit:
# https://supabase.com/docs/guides/local-development/cli/config
# A string used to distinguish different Supabase projects on the same host. Defaults to the
# working directory name when running `supabase init`.
project_id = "ClassroomCopilot"
[api]
enabled = true
# Port to use for the API URL.
port = 8000
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"]
# Extra schemas to add to the search_path of every request.
extra_search_path = ["public", "extensions"]
# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
# for accidental or malicious requests.
max_rows = 1000
[api.tls]
# Enable HTTPS endpoints locally using a self-signed certificate.
enabled = false
[db]
# Port to use for the local database URL.
port = 5432
# Port used by db diff command to initialize the shadow database.
shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW
# server_version;` on the remote database to check.
major_version = 15
[db.pooler]
enabled = false
# Port to use for the local connection pooler.
port = 54329
# Specifies when a server connection can be reused by other clients.
# Configure one of the supported pooler modes: `transaction`, `session`.
pool_mode = "transaction"
# How many server connections to allow per user/database pair.
default_pool_size = 20
# Maximum number of client connections allowed.
max_client_conn = 100
[db.vault]
#secret_key = "mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc"
[db.migrations]
# Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql"
]
[db.seed]
# If enabled, seeds the database after migrations during a db reset.
enabled = true
# Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql",
"./db/init/seed.sql"
]
[realtime]
enabled = true
# Bind realtime via either IPv4 or IPv6. (default: IPv4)
# ip_version = "IPv6"
# The maximum length in bytes of HTTP request headers. (default: 4096)
# max_header_length = 4096
[studio]
enabled = true
# Port to use for Supabase Studio.
port = 3000
# External URL of the API server that frontend connects to.
api_url = "http://localhost"
# OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "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"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface.
[inbucket]
enabled = true
# Port to use for the email testing server web interface.
port = 54324
# Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325
# pop3_port = 54326
admin_email = "admin@classroomcopilot.ai"
sender_name = "Super Admin"
[storage]
enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"
# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true
# Uncomment to configure local storage buckets
# [storage.buckets.images]
# public = false
# file_size_limit = "50MiB"
# allowed_mime_types = ["image/png", "image/jpeg"]
# objects_path = "./images"
[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails.
site_url = "http://localhost"
# Force JWT issuer to match site_url
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["http://localhost", "http://127.0.0.1"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600
# If disabled, the refresh token will never expire.
enable_refresh_token_rotation = true
# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
# Requires enable_refresh_token_rotation = true.
refresh_token_reuse_interval = 10
# Allow/disallow new user signups to your project.
enable_signup = true
# Allow/disallow anonymous sign-ins to your project.
enable_anonymous_sign_ins = false
# Allow/disallow testing manual linking of accounts
enable_manual_linking = false
# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more.
minimum_password_length = 6
# Passwords that do not meet the following requirements will be rejected as weak. Supported values
# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols`
password_requirements = ""
# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`.
# [auth.captcha]
# enabled = true
# provider = "hcaptcha"
# secret = ""
[auth.email]
# Allow/disallow new user signups via email to your project.
enable_signup = true
# If enabled, a user will be required to confirm any email change on both the old, and new email
# addresses. If disabled, only the new email is required to confirm.
double_confirm_changes = true
# If enabled, users need to confirm their email address before signing in.
enable_confirmations = false
# If enabled, users will need to reauthenticate or have logged in recently to change their password.
secure_password_change = false
# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
max_frequency = "1s"
# Number of characters used in the email OTP.
otp_length = 6
# Number of seconds before the email OTP expires (defaults to 1 hour).
otp_expiry = 3600
# Use a production-ready SMTP server
# [auth.email.smtp]
# enabled = true
# host = "smtp.sendgrid.net"
# port = 587
# user = "apikey"
# pass = "env(SENDGRID_API_KEY)"
# admin_email = "admin@email.com"
# sender_name = "Admin"
# Uncomment to customize email template
# [auth.email.template.invite]
# subject = "You have been invited"
# content_path = "./supabase/templates/invite.html"
[auth.sms]
# Allow/disallow new user signups via SMS to your project.
enable_signup = false
# If enabled, users need to confirm their phone number before signing in.
enable_confirmations = false
# Template for sending OTP to users
template = "Your code is {{ .Code }}"
# Controls the minimum amount of time that must pass before sending another sms otp.
max_frequency = "5s"
# Use pre-defined map of phone number to OTP for testing.
# [auth.sms.test_otp]
# 4152127777 = "123456"
# Configure logged in session timeouts.
# [auth.sessions]
# Force log out after the specified duration.
# timebox = "24h"
# Force log out if the user has been inactive longer than the specified duration.
# inactivity_timeout = "8h"
# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
# [auth.hook.custom_access_token]
# enabled = true
# uri = "pg-functions://<database>/<schema>/<hook_name>"
# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
[auth.sms.twilio]
enabled = false
account_sid = ""
message_service_sid = ""
# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
# Multi-factor-authentication is available to Supabase Pro plan.
[auth.mfa]
# Control how many MFA factors can be enrolled at once per user.
max_enrolled_factors = 10
# Control MFA via App Authenticator (TOTP)
[auth.mfa.totp]
enroll_enabled = false
verify_enabled = false
# Configure MFA via Phone Messaging
[auth.mfa.phone]
enroll_enabled = false
verify_enabled = false
otp_length = 6
template = "Your code is {{ .Code }}"
max_frequency = "5s"
# Configure MFA via WebAuthn
# [auth.mfa.web_authn]
# enroll_enabled = true
# verify_enabled = true
# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
[auth.external.apple]
enabled = false
client_id = ""
# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
# Overrides the default auth redirectUrl.
redirect_uri = ""
# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
# or any other third-party OIDC providers.
url = ""
# If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
skip_nonce_check = false
# Use Firebase Auth as a third-party provider alongside Supabase Auth.
[auth.third_party.firebase]
enabled = false
# project_id = "my-firebase-project"
# Use Auth0 as a third-party provider alongside Supabase Auth.
[auth.third_party.auth0]
enabled = false
# tenant = "my-auth0-tenant"
# tenant_region = "us"
# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth.
[auth.third_party.aws_cognito]
enabled = false
# user_pool_id = "my-user-pool-id"
# user_pool_region = "us-east-1"
[edge_runtime]
enabled = true
# Configure one of the supported request policies: `oneshot`, `per_worker`.
# Use `oneshot` for hot reload, or `per_worker` for load testing.
policy = "oneshot"
# Port to attach the Chrome inspector for debugging edge functions.
inspector_port = 8083
# Use these configurations to customize your Edge Function.
# [functions.MY_FUNCTION_NAME]
# enabled = true
# verify_jwt = true
# import_map = "./functions/MY_FUNCTION_NAME/deno.json"
# Uncomment to specify a custom file path to the entrypoint.
# Supported file extensions are: .ts, .js, .mjs, .jsx, .tsx
# entrypoint = "./functions/MY_FUNCTION_NAME/index.ts"
# Specifies static files to be bundled with the function. Supports glob patterns.
# For example, if you want to serve static HTML pages in your function:
# static_files = [ "./functions/MY_FUNCTION_NAME/*.html" ]
[analytics]
enabled = true
port = 54327
# Configure one of the supported backends: `postgres`, `bigquery`.
backend = "postgres"
# Experimental features may be deprecated any time
[experimental]
# Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = ""
# Configures S3 bucket region, eg. us-east-1
s3_region = ""
# Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = ""
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = ""

View File

@ -342,23 +342,9 @@ services:
image: supabase/postgres:15.8.1.060 image: supabase/postgres:15.8.1.060
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z - ./volumes/db:/docker-entrypoint-initdb.d:Z
# Must be superuser to create event trigger # PGDATA directory - persists database files between restarts
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z - ./volumes/db-data:/var/lib/postgresql/data:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ./volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for internal supabase data such as _analytics
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
# Changes required for Pooler support
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
healthcheck: healthcheck:
test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ] test: [ "CMD", "pg_isready", "-U", "postgres", "-h", "localhost" ]
interval: 5s interval: 5s
@ -369,6 +355,7 @@ services:
condition: service_healthy condition: service_healthy
environment: environment:
POSTGRES_HOST: /var/run/postgresql POSTGRES_HOST: /var/run/postgresql
POSTGRES_USER: postgres
PGPORT: ${POSTGRES_PORT} PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT} POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD} PGPASSWORD: ${POSTGRES_PASSWORD}

279
volumes/api/kong.yml Normal file
View File

@ -0,0 +1,279 @@
_format_version: '2.1'
_transform: true
###
### Consumers / Users
###
consumers:
- username: DASHBOARD
- username: anon
keyauth_credentials:
- key: $SUPABASE_ANON_KEY
- username: service_role
keyauth_credentials:
- key: $SUPABASE_SERVICE_KEY
###
### Access Control List
###
acls:
- consumer: anon
group: anon
- consumer: service_role
group: admin
###
### Dashboard credentials
###
basicauth_credentials:
- consumer: DASHBOARD
username: $DASHBOARD_USERNAME
password: $DASHBOARD_PASSWORD
###
### API Routes
###
services:
## Open Auth routes
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
## Secure Auth routes
- name: auth-v1
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
url: http://auth:9999/
routes:
- name: auth-v1-all
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure REST routes
- name: rest-v1
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
url: http://rest:3000/
routes:
- name: rest-v1-all
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure GraphQL routes
- name: graphql-v1
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
url: http://rest:3000/rpc/graphql
routes:
- name: graphql-v1-all
strip_path: true
paths:
- /graphql/v1
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: request-transformer
config:
add:
headers:
- Content-Profile:graphql_public
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure Realtime routes
- name: realtime-v1-ws
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/socket
protocol: ws
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-rest
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/api
protocol: http
routes:
- name: realtime-v1-rest
strip_path: true
paths:
- /realtime/v1/api
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Storage routes: the storage server manages its own auth
- name: storage-v1
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
url: http://storage:5000/
routes:
- name: storage-v1-all
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
## Edge Functions routes
- name: functions-v1
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
url: http://functions:9000/
routes:
- name: functions-v1-all
strip_path: true
paths:
- /functions/v1/
plugins:
- name: cors
## Analytics routes
- name: analytics-v1
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
url: http://analytics:4000/
routes:
- name: analytics-v1-all
strip_path: true
paths:
- /analytics/v1/
## Secure Database routes
- name: meta
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
url: http://meta:8080/
routes:
- name: meta-all
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
## MCP Server routes - Model Context Protocol for AI integrations
## Authentication is handled by the MCP server itself (JWT validation)
- name: mcp-v1
_comment: 'MCP Server: /mcp/v1/* -> http://mcp:3100/mcp'
url: http://mcp:3100
routes:
- name: mcp-v1-all
strip_path: true
paths:
- /mcp/v1/
plugins:
- name: request-transformer
config:
replace:
uri: /mcp
- name: cors
config:
origins:
- "http://localhost:3000"
- "http://127.0.0.1:3000"
- "http://192.168.0.94:50001"
methods:
- GET
- POST
- DELETE
- OPTIONS
headers:
- Accept
- Authorization
- Content-Type
- X-Client-Info
- apikey
- Mcp-Session-Id
exposed_headers:
- Mcp-Session-Id
credentials: true
max_age: 3600
## Protected Dashboard - catch all remaining routes
#- name: dashboard
# _comment: 'Studio: /* -> http://studio:3000/*'
# url: http://studio:3000/
# routes:
# - name: dashboard-all
# strip_path: true
# paths:
# - /
# plugins:
# - name: cors
# - name: basic-auth
# config:
# hide_credentials: true

View File

@ -0,0 +1,82 @@
-- ============================================================
-- Supabase Core Roles & Schemas Initialization
-- Runs first (50-) to set up all roles required by later scripts
-- ============================================================
-- Create supabase_admin role
DO
$$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_admin') THEN
CREATE ROLE supabase_admin WITH LOGIN CREATEROLE REPLICATION BYPASSRLS PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
END
$$;
-- Create ALL standard Supabase roles needed by subsequent init scripts
-- (56-roles.sql will ALTER these, so they must pre-exist)
DO
$$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'anon') THEN
CREATE ROLE anon NOLOGIN NOINHERIT;
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticated') THEN
CREATE ROLE authenticated NOLOGIN NOINHERIT;
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'service_role') THEN
CREATE ROLE service_role NOLOGIN NOINHERIT BYPASSRLS;
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticator') THEN
CREATE ROLE authenticator WITH NOINHERIT LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'pgbouncer') THEN
CREATE ROLE pgbouncer WITH LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_auth_admin') THEN
CREATE ROLE supabase_auth_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_storage_admin') THEN
CREATE ROLE supabase_storage_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_functions_admin') THEN
CREATE ROLE supabase_functions_admin WITH NOINHERIT CREATEROLE LOGIN PASSWORD 'siqt3T9iHjWpjATtKdlBjJKOifiLf0Oe';
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_replication_admin') THEN
CREATE ROLE supabase_replication_admin LOGIN REPLICATION;
END IF;
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_read_only_user') THEN
CREATE ROLE supabase_read_only_user BYPASSRLS;
END IF;
END
$$;
-- Grant pg_read_server_files to supabase_admin (required by pg_net extension)
GRANT pg_read_server_files TO supabase_admin;
-- Core grants
GRANT ALL ON DATABASE postgres TO supabase_admin WITH GRANT OPTION;
GRANT anon TO authenticator;
GRANT authenticated TO authenticator;
GRANT service_role TO authenticator;
GRANT supabase_auth_admin TO supabase_admin;
GRANT supabase_storage_admin TO supabase_admin;
GRANT supabase_functions_admin TO supabase_admin;
-- Create _supabase database for internal Supabase services
CREATE DATABASE _supabase WITH OWNER supabase_admin;
-- Create required schemas in postgres database
CREATE SCHEMA IF NOT EXISTS _supabase AUTHORIZATION supabase_admin;
CREATE SCHEMA IF NOT EXISTS extensions AUTHORIZATION supabase_admin;
-- Stub schemas: auth/storage populated by GoTrue/Storage services at runtime
-- but must exist for 61-core-schema.sql to pass validation
CREATE SCHEMA IF NOT EXISTS auth;
CREATE SCHEMA IF NOT EXISTS storage;
GRANT USAGE ON SCHEMA auth TO supabase_admin, supabase_auth_admin;
GRANT USAGE ON SCHEMA storage TO supabase_admin, supabase_storage_admin;
-- Switch to _supabase database and create required schemas
\connect _supabase
CREATE SCHEMA IF NOT EXISTS _analytics AUTHORIZATION supabase_admin;

123
volumes/db/51-webhooks.sql Normal file
View File

@ -0,0 +1,123 @@
-- Create pg_net extension outside transaction (cannot run inside BEGIN/COMMIT)
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
BEGIN;
-- Create pg_net extension
-- pg_net extension created above (outside transaction)
-- Create the supabase_functions schema
CREATE SCHEMA IF NOT EXISTS supabase_functions AUTHORIZATION supabase_admin;
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition
CREATE TABLE supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
-- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
-- supabase_functions.hooks definition
CREATE TABLE supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Webhook request logs stored temporarily while awaiting the request.';
CREATE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $func$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(
url,
params,
headers,
timeout_ms
);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD,
'record', NEW,
'type', TG_OP,
'table', TG_TABLE_NAME,
'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(
url,
payload,
headers,
timeout_ms
);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks
(hook_table_id, hook_name, request_id)
VALUES
(TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$func$;
-- Supabase super admin
DO
$$
BEGIN
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_roles
WHERE rolname = 'supabase_functions_admin'
) THEN
CREATE ROLE supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
END IF;
END
$$;
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
ALTER function supabase_functions.http_request() OWNER TO supabase_functions_admin;
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183942_update_grants');
ALTER ROLE supabase_functions_admin SET search_path = supabase_functions;
COMMIT;

5
volumes/db/52-jwt.sql Normal file
View File

@ -0,0 +1,5 @@
\set jwt_secret `echo "$JWT_SECRET"`
\set jwt_exp `echo "$JWT_EXP"`
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';

3
volumes/db/53-logs.sql Normal file
View File

@ -0,0 +1,3 @@
-- Create analytics/logs schema
CREATE SCHEMA IF NOT EXISTS _analytics;
ALTER SCHEMA _analytics OWNER TO supabase_admin;

View File

@ -0,0 +1,3 @@
-- create realtime schema for Realtime RLS (already exists but just in case)
CREATE SCHEMA IF NOT EXISTS _realtime;
ALTER SCHEMA _realtime OWNER TO supabase_admin;

13
volumes/db/55-pooler.sql Normal file
View File

@ -0,0 +1,13 @@
-- pgBouncer auth function
CREATE OR REPLACE FUNCTION public.get_auth(p_usename TEXT) RETURNS TABLE(username TEXT, password TEXT) AS
$$
BEGIN
RAISE WARNING 'get_auth() called for user: %', p_usename;
RETURN QUERY
SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow
WHERE usename = p_usename;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
REVOKE ALL ON FUNCTION public.get_auth(p_usename TEXT) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION public.get_auth(p_usename TEXT) TO pgbouncer;

9
volumes/db/56-roles.sql Normal file
View File

@ -0,0 +1,9 @@
-- NOTE: change to your own passwords for production environments
\set pgpass `echo "$POSTGRES_PASSWORD"`
ALTER USER supabase_admin WITH PASSWORD :'pgpass';
ALTER USER authenticator WITH PASSWORD :'pgpass';
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';

View File

@ -0,0 +1,345 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Core schema setup for ClassConcepts with neoFS filesystem integration
-- Dependencies: auth.users (Supabase Auth)
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
-- Create completed_at trigger function for document artefacts
create or replace function public.set_completed_at()
returns trigger as $$
begin
if NEW.status = 'completed' and OLD.status != 'completed' then
NEW.completed_at = now();
end if;
return NEW;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (
user_type in (
'teacher',
'student',
'email_teacher',
'email_student',
'developer',
'superadmin'
)
),
username text not null unique,
full_name text,
display_name text,
metadata jsonb default '{}'::jsonb,
user_db_name text,
school_db_name text,
neo4j_sync_status text default 'pending' check (neo4j_sync_status in ('pending', 'ready', 'failed')),
neo4j_synced_at timestamp with time zone,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: teacher or student';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
geo_coordinates jsonb default '{}'::jsonb,
neo4j_uuid_string text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
comment on column public.institutes.geo_coordinates is 'Geospatial coordinates from OSM search (latitude, longitude, boundingbox)';
--[ 6. neoFS Filesystem Tables ]--
-- File cabinets for organizing files
create table if not exists public.file_cabinets (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.file_cabinets is 'User file cabinets for organizing documents and files';
-- Files stored in cabinets
create table if not exists public.files (
id uuid primary key default uuid_generate_v4(),
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
name text not null,
path text not null,
bucket text default 'file-cabinets' not null,
created_at timestamp with time zone default timezone('utc'::text, now()),
mime_type text,
metadata jsonb default '{}'::jsonb,
size text,
category text generated always as (
case
when mime_type like 'image/%' then 'image'
when mime_type = 'application/pdf' then 'document'
when mime_type in ('application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document') then 'document'
when mime_type in ('application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') then 'spreadsheet'
when mime_type in ('application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation') then 'presentation'
when mime_type like 'audio/%' then 'audio'
when mime_type like 'video/%' then 'video'
else 'other'
end
) stored
);
comment on table public.files is 'Files stored in user cabinets with automatic categorization';
comment on column public.files.category is 'Automatically determined file category based on MIME type';
-- AI brains for processing files
create table if not exists public.brains (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
purpose text,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.brains is 'AI brains for processing and analyzing user files';
-- Brain-file associations
create table if not exists public.brain_files (
brain_id uuid not null references public.brains(id) on delete cascade,
file_id uuid not null references public.files(id) on delete cascade,
primary key (brain_id, file_id)
);
comment on table public.brain_files is 'Associations between AI brains and files for processing';
-- Document artefacts from file processing
create table if not exists public.document_artefacts (
id uuid primary key default uuid_generate_v4(),
file_id uuid references public.files(id) on delete cascade,
page_number integer default 0 not null,
type text not null,
rel_path text not null,
size_tag text,
language text,
chunk_index integer,
extra jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
status text default 'completed' not null check (status in ('pending', 'processing', 'completed', 'failed')),
started_at timestamp with time zone default timezone('utc'::text, now()),
completed_at timestamp with time zone,
error_message text
);
comment on table public.document_artefacts is 'Extracted artefacts from document processing';
comment on column public.document_artefacts.status is 'Extraction status: pending, processing, completed, or failed';
comment on column public.document_artefacts.started_at is 'Timestamp when extraction process started';
comment on column public.document_artefacts.completed_at is 'Timestamp when extraction process completed (success or failure)';
comment on column public.document_artefacts.error_message is 'Error details if extraction failed';
-- Function execution logs
create table if not exists public.function_logs (
id serial primary key,
file_id uuid references public.files(id) on delete cascade,
timestamp timestamp with time zone default timezone('utc'::text, now()),
step text,
message text,
data jsonb
);
comment on table public.function_logs is 'Logs of function executions and processing steps';
--[ 7. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 8. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 9. Exam Specifications ]--
create table if not exists public.eb_specifications (
id uuid primary key default uuid_generate_v4(),
spec_code text unique,
exam_board_code text,
award_code text,
subject_code text,
first_teach text,
spec_ver text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_specifications is 'Exam board specifications and their primary document';
comment on column public.eb_specifications.spec_code is 'Unique code for the specification, used for linking exams';
comment on column public.eb_specifications.doc_details is 'Tika extract of the specification document';
comment on column public.eb_specifications.docling_docs is 'Docling extracts settings and storage locations for the specification document';
--[ 10. Exam Papers / Entries ]--
create table if not exists public.eb_exams (
id uuid primary key default uuid_generate_v4(),
exam_code text unique,
spec_code text references public.eb_specifications(spec_code) on delete cascade,
paper_code text,
tier text,
session text,
type_code text,
-- Document storage details
storage_loc text,
doc_type text check (doc_type in ('pdf', 'json', 'md', 'html', 'txt', 'doctags')),
doc_details jsonb default '{}'::jsonb, -- e.g. Tika extract
docling_docs jsonb default '{}'::jsonb, -- e.g. Docling extracts settings and storage locations
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.eb_exams is 'Exam papers and related documents linked to specifications';
comment on column public.eb_exams.exam_code is 'Unique code for the exam paper, used for linking questions';
comment on column public.eb_exams.type_code is 'Type code for the exam document: Question Paper (QP), Mark Scheme (MS), Examiner Report (ER), Other (OT)';
comment on column public.eb_exams.doc_details is 'Tika extract of the exam paper document';
comment on column public.eb_exams.docling_docs is 'Docling extracts settings and storage locations for the exam paper document';
--[ 11. Indexes ]--
-- Index for geospatial queries
create index if not exists idx_institutes_geo_coordinates on public.institutes using gin(geo_coordinates);
create index if not exists idx_institutes_urn on public.institutes(urn);
-- Document artefacts indexes
create index if not exists idx_document_artefacts_file_status on public.document_artefacts(file_id, status);
create index if not exists idx_document_artefacts_file_type on public.document_artefacts(file_id, type);
create index if not exists idx_document_artefacts_status on public.document_artefacts(status);
-- File indexes
create index if not exists idx_files_cabinet_id on public.files(cabinet_id);
create index if not exists idx_files_mime_type on public.files(mime_type);
create index if not exists idx_files_category on public.files(category);
-- Brain indexes
create index if not exists idx_brains_user_id on public.brains(user_id);
-- Exam board indexes
create index if not exists idx_eb_exams_exam_code on public.eb_exams(exam_code);
create index if not exists idx_eb_exams_spec_code on public.eb_exams(spec_code);
create index if not exists idx_eb_exams_paper_code on public.eb_exams(paper_code);
create index if not exists idx_eb_exams_tier on public.eb_exams(tier);
create index if not exists idx_eb_exams_session on public.eb_exams(session);
create index if not exists idx_eb_exams_type_code on public.eb_exams(type_code);
create index if not exists idx_eb_specifications_spec_code on public.eb_specifications(spec_code);
create index if not exists idx_eb_specifications_exam_board_code on public.eb_specifications(exam_board_code);
create index if not exists idx_eb_specifications_award_code on public.eb_specifications(award_code);
create index if not exists idx_eb_specifications_subject_code on public.eb_specifications(subject_code);
--[ 12. Triggers ]--
-- Set completed_at when document artefact status changes to completed
create trigger trigger_set_completed_at
before update on public.document_artefacts
for each row
execute function public.set_completed_at();
-- Set updated_at on profile updates
create trigger trigger_profiles_updated_at
before update on public.profiles
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute updates
create trigger trigger_institutes_updated_at
before update on public.institutes
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_memberships updates
create trigger trigger_institute_memberships_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_membership_requests updates
create trigger trigger_institute_membership_requests_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_specifications updates
create trigger trigger_eb_specifications_updated_at
before update on public.eb_specifications
for each row
execute function public.handle_updated_at();
-- Set updated_at on eb_exams updates
create trigger trigger_eb_exams_updated_at
before update on public.eb_exams
for each row
execute function public.handle_updated_at();

View File

@ -0,0 +1,191 @@
--[ 8. Auth Functions ]--
-- Create a secure function to check admin status
create or replace function public.is_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create a secure function to check super admin status
create or replace function public.is_super_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create public wrapper functions
-- Note: These are now the main implementation functions, not wrappers
-- The original auth schema functions have been moved to public schema
-- Grant execute permissions
grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated;
-- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
declare
result json;
begin
-- Only allow this to run as service role or superuser
if not (
current_user = 'service_role'
or exists (
select 1 from pg_roles
where rolname = current_user
and rolsuper
)
) then
raise exception 'Must be run as service_role or superuser';
end if;
-- Update user_type and username for admin
update public.profiles
set user_type = 'admin',
username = coalesce(username, 'superadmin'),
display_name = coalesce(display_name, 'Super Admin')
where email = admin_email
returning json_build_object(
'id', id,
'email', email,
'user_type', user_type,
'username', username,
'display_name', display_name
) into result;
if result is null then
raise exception 'Admin user with email % not found', admin_email;
end if;
return result;
end;
$$;
-- Grant execute permissions
revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role;
-- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
begin
return public.setup_initial_admin(admin_email);
end;
$$;
-- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role;
--[ 9. Utility Functions ]--
-- Check if database is ready
create or replace function check_db_ready()
returns boolean
language plpgsql
security definer
as $$
begin
-- Check if essential schemas exist
if not exists (
select 1
from information_schema.schemata
where schema_name in ('auth', 'storage', 'public')
) then
return false;
end if;
-- Check if essential tables exist
if not exists (
select 1
from information_schema.tables
where table_schema = 'auth'
and table_name = 'users'
) then
return false;
end if;
-- Check if RLS is enabled on public.profiles
if not exists (
select 1
from pg_tables
where schemaname = 'public'
and tablename = 'profiles'
and rowsecurity = true
) then
return false;
end if;
return true;
end;
$$;
-- Grant execute permission
grant execute on function check_db_ready to anon, authenticated, service_role;
-- Function to handle new user registration
create or replace function public.handle_new_user()
returns trigger
language plpgsql
security definer set search_path = public
as $$
declare
default_user_type text := 'email_student';
default_username text;
begin
-- Generate username from email
default_username := split_part(new.email, '@', 1);
insert into public.profiles (
id,
email,
user_type,
username,
display_name
)
values (
new.id,
new.email,
coalesce(new.raw_user_meta_data->>'user_type', default_user_type),
coalesce(new.raw_user_meta_data->>'username', default_username),
coalesce(new.raw_user_meta_data->>'display_name', default_username)
);
return new;
end;
$$;
-- Trigger for new user creation
drop trigger if exists on_auth_user_created on auth.users;
create trigger on_auth_user_created
after insert on auth.users
for each row execute procedure public.handle_new_user();
--[ 11. Database Triggers ]--
drop trigger if exists handle_profiles_updated_at on public.profiles;
create trigger handle_profiles_updated_at
before update on public.profiles
for each row execute function public.handle_updated_at();
drop trigger if exists handle_institute_memberships_updated_at on public.institute_memberships;
create trigger handle_institute_memberships_updated_at
before update on public.institute_memberships
for each row execute function public.handle_updated_at();
drop trigger if exists handle_membership_requests_updated_at on public.institute_membership_requests;
create trigger handle_membership_requests_updated_at
before update on public.institute_membership_requests
for each row execute function public.handle_updated_at();

View File

@ -0,0 +1,20 @@
-- Storage policies configuration for Supabase
-- Note: Storage bucket policies are managed by Supabase internally
-- This file provides guidance on what should be configured
-- Storage bucket policies should be configured through:
-- 1. Supabase Dashboard > Storage > Policies
-- 2. Or via SQL with proper permissions (requires service_role or owner access)
-- Recommended policies for storage.buckets:
-- - Super admin has full access to buckets
-- - Users can create their own buckets
-- - Users can view their own buckets or public buckets
-- Recommended policies for storage.objects:
-- - Users can upload to buckets they own
-- - Users can view objects in public buckets
-- - Users can manage objects in buckets they own
-- Note: These policies require the service_role or appropriate permissions
-- to be applied to the storage schema tables

View File

@ -0,0 +1,20 @@
-- Initial admin setup for ClassroomCopilot
-- This file handles basic database setup and permissions
-- Ensure uuid-ossp extension is enabled
create extension if not exists "uuid-ossp" schema extensions;
-- Grant basic permissions to authenticated users for public schema
-- Note: These permissions are granted to allow users to work with the application
grant usage on schema public to authenticated;
grant all on all tables in schema public to authenticated;
grant all on all sequences in schema public to authenticated;
grant all on all functions in schema public to authenticated;
-- Set default privileges for future objects
alter default privileges in schema public grant all on tables to authenticated;
alter default privileges in schema public grant all on sequences to authenticated;
alter default privileges in schema public grant all on functions to authenticated;
-- Note: The setup_initial_admin function is defined in 62-functions-triggers.sql
-- and should be called with an admin email parameter when needed

View File

@ -0,0 +1,95 @@
-- Files table augments and storage GC hooks
-- 1) Add columns to files if missing
do $$
begin
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='uploaded_by'
) then
alter table public.files add column uploaded_by uuid references public.profiles(id);
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='size_bytes'
) then
alter table public.files add column size_bytes bigint;
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='source'
) then
alter table public.files add column source text default 'uploader-web';
end if;
end $$;
-- 2) Unique index for cabinet/path combo
create unique index if not exists uq_files_cabinet_path on public.files(cabinet_id, path);
-- 3) Storage GC helpers (ported from neoFS with storage schema)
create or replace function public._delete_storage_objects(p_bucket text, p_path text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_path is null then
return;
end if;
delete from storage.objects where bucket_id = p_bucket and name = p_path;
delete from storage.objects where bucket_id = p_bucket and name like p_path || '/%';
end
$$;
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if tg_op = 'DELETE' then
perform public._delete_storage_objects(old.bucket, old.path);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
end if;
end if;
return null;
end
$$;
-- 4) Attach GC trigger to files bucket/path changes
drop trigger if exists trg_files_gc on public.files;
create trigger trg_files_gc
after delete or update of bucket, path on public.files
for each row execute function public._storage_gc_sql();
-- 5) Document artefacts GC: remove artefact objects from storage when rows change/delete
create or replace function public._artefact_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_bucket text;
begin
if tg_op = 'DELETE' then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
return old;
elsif tg_op = 'UPDATE' then
if (old.rel_path is distinct from new.rel_path) or (old.file_id is distinct from new.file_id) then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
end if;
return new;
end if;
end
$$;
drop trigger if exists trg_document_artefacts_gc on public.document_artefacts;
create trigger trg_document_artefacts_gc
before delete or update of file_id, rel_path on public.document_artefacts
for each row execute function public._artefact_gc_sql();

View File

@ -0,0 +1,84 @@
-- Enable RLS and define policies for filesystem tables
-- 1) Enable RLS
alter table if exists public.file_cabinets enable row level security;
alter table if exists public.files enable row level security;
alter table if exists public.brain_files enable row level security;
alter table if exists public.document_artefacts enable row level security;
drop policy if exists "User can access own cabinets" on public.file_cabinets;
create policy "User can access own cabinets" on public.file_cabinets
using (user_id = auth.uid())
with check (user_id = auth.uid());
drop policy if exists "User can access files in own cabinet" on public.files;
create policy "User can access files in own cabinet" on public.files
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can insert files into own cabinet" on public.files;
create policy "User can insert files into own cabinet" on public.files for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can update files in own cabinet" on public.files;
create policy "User can update files in own cabinet" on public.files for update to authenticated
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can delete files from own cabinet" on public.files;
create policy "User can delete files from own cabinet" on public.files for delete
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
-- 4) Brain-files: allow linking owned files to owned brains
drop policy if exists "User can link files they own to their brains" on public.brain_files;
create policy "User can link files they own to their brains" on public.brain_files
using (
exists (select 1 from public.brains b where b.id = brain_files.brain_id and b.user_id = auth.uid())
and exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = brain_files.file_id and c.user_id = auth.uid()
)
)
with check (true);
-- 5) Document artefacts: allow reads to owners via file cabinet, writes via service_role
drop policy if exists "artefacts_read_by_owner" on public.document_artefacts;
create policy "artefacts_read_by_owner" on public.document_artefacts for select to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
drop policy if exists "artefacts_rw_service" on public.document_artefacts;
create policy "artefacts_rw_service" on public.document_artefacts to service_role
using (true) with check (true);
-- Allow owners to delete their artefacts (needed for cascades under RLS)
drop policy if exists "artefacts_delete_by_owner" on public.document_artefacts;
create policy "artefacts_delete_by_owner" on public.document_artefacts for delete to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
-- File vectors RLS and policies are defined in 67-vectors.sql after the table is created

79
volumes/db/67-vectors.sql Normal file
View File

@ -0,0 +1,79 @@
-- Vectors: file_vectors table and similarity search function
-- 1) Ensure pgvector extension is available
create extension if not exists vector;
-- 2) File vectors table
create table if not exists public.file_vectors (
id bigint generated by default as identity primary key,
created_at timestamp with time zone default now() not null,
embedding public.vector,
metadata jsonb,
content text
);
-- 3) ANN index (skipped until embedding dimension is fixed)
-- To enable: set column type to public.vector(<dim>) and uncomment:
-- create index if not exists file_vectors_embedding_idx
-- on public.file_vectors using ivfflat (embedding public.vector_cosine_ops)
-- with (lists='100');
-- 3b) Enable RLS and set policies (moved here to avoid ordering issues)
alter table if exists public.file_vectors enable row level security;
drop policy if exists "vectors_read_by_owner" on public.file_vectors;
create policy "vectors_read_by_owner" on public.file_vectors for select to authenticated
using (coalesce((metadata->>'file_id')::uuid, null) is null or exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = (metadata->>'file_id')::uuid and c.user_id = auth.uid()
));
drop policy if exists "vectors_rw_service" on public.file_vectors;
create policy "vectors_rw_service" on public.file_vectors to service_role
using (true) with check (true);
-- 4) Match function mirrored from neoFS (generic metadata mapping)
create or replace function public.match_file_vectors(
filter jsonb,
match_count integer,
query_embedding public.vector
)
returns table (
id bigint,
file_id uuid,
cabinet_id uuid,
artefact_type text,
artefact_is text,
original_path_prefix text,
original_filename text,
content text,
metadata jsonb,
similarity double precision
)
language sql stable as $$
select
fv.id,
nullif(fv.metadata->>'file_id','')::uuid as file_id,
nullif(fv.metadata->>'cabinet_id','')::uuid as cabinet_id,
nullif(fv.metadata->>'artefact_type','') as artefact_type,
nullif(fv.metadata->>'artefact_is','') as artefact_is,
nullif(fv.metadata->>'original_path_prefix','') as original_path_prefix,
nullif(fv.metadata->>'original_filename','') as original_filename,
fv.content,
fv.metadata,
1 - (fv.embedding <=> query_embedding) as similarity
from public.file_vectors fv
where
(coalesce(filter ? 'file_id', false) = false or (fv.metadata->>'file_id')::uuid = (filter->>'file_id')::uuid)
and (coalesce(filter ? 'cabinet_id', false) = false or (fv.metadata->>'cabinet_id')::uuid = (filter->>'cabinet_id')::uuid)
and (coalesce(filter ? 'artefact_type', false) = false or (fv.metadata->>'artefact_type') = (filter->>'artefact_type'))
and (coalesce(filter ? 'artefact_id', false) = false or (fv.metadata->>'artefact_id') = (filter->>'artefact_id'))
and (coalesce(filter ? 'original_path_prefix', false) = false or (fv.metadata->>'original_path_prefix') like (filter->>'original_path_prefix') || '%')
and (coalesce(filter ? 'original_path_prefix_ilike', false)= false or (fv.metadata->>'original_path_prefix') ilike (filter->>'original_path_prefix_ilike') || '%')
and (coalesce(filter ? 'original_filename', false) = false or (fv.metadata->>'original_filename') = (filter->>'original_filename'))
and (coalesce(filter ? 'original_filename_ilike', false)= false or (fv.metadata->>'original_filename') ilike (filter->>'original_filename_ilike'))
order by fv.embedding <=> query_embedding
limit greatest(coalesce(match_count, 10), 1)
$$;

View File

@ -0,0 +1,73 @@
-- Cabinet memberships for sharing access
create table if not exists public.cabinet_memberships (
id uuid default uuid_generate_v4() primary key,
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
profile_id uuid not null references public.profiles(id) on delete cascade,
role text not null check (role in ('owner','editor','viewer')),
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(cabinet_id, profile_id)
);
create index if not exists idx_cabinet_memberships_cabinet on public.cabinet_memberships(cabinet_id);
create index if not exists idx_cabinet_memberships_profile on public.cabinet_memberships(profile_id);
-- Updated at trigger
drop trigger if exists trg_cabinet_memberships_updated_at on public.cabinet_memberships;
create trigger trg_cabinet_memberships_updated_at
before update on public.cabinet_memberships
for each row execute function public.handle_updated_at();
-- RLS and policies
alter table if exists public.cabinet_memberships enable row level security;
-- Members can select their own memberships; cabinet owners can also see memberships
drop policy if exists cm_read_self_or_owner on public.cabinet_memberships;
create policy cm_read_self_or_owner on public.cabinet_memberships for select to authenticated
using (
profile_id = auth.uid() or exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
)
);
-- Cabinet owners can insert memberships
drop policy if exists cm_insert_by_owner on public.cabinet_memberships;
create policy cm_insert_by_owner on public.cabinet_memberships for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can update memberships (e.g., role)
drop policy if exists cm_update_by_owner on public.cabinet_memberships;
create policy cm_update_by_owner on public.cabinet_memberships for update to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can delete memberships
drop policy if exists cm_delete_by_owner on public.cabinet_memberships;
create policy cm_delete_by_owner on public.cabinet_memberships for delete to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Extend access to cabinets/files for members (after table exists)
drop policy if exists "User can access cabinets via membership" on public.file_cabinets;
create policy "User can access cabinets via membership" on public.file_cabinets for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = file_cabinets.id and m.profile_id = auth.uid()
));
drop policy if exists "User can access files via membership" on public.files;
create policy "User can access files via membership" on public.files for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = files.cabinet_id and m.profile_id = auth.uid()
));

View File

@ -0,0 +1,48 @@
-- Ensure storage objects for all artefacts are removed when a file is deleted
-- by deleting the entire "cabinet_id/file_id" directory prefix in Storage.
-- Helper to delete all objects under a prefix
create or replace function public._delete_storage_prefix(p_bucket text, p_prefix text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_prefix is null then
return;
end if;
-- Delete any objects whose name starts with the prefix + '/'
delete from storage.objects where bucket_id = p_bucket and name like p_prefix || '/%';
-- In case an object exists exactly at the prefix (rare but safe)
delete from storage.objects where bucket_id = p_bucket and name = p_prefix;
end
$$;
-- Update file-level GC to also delete the parent directory prefix (cabinet_id/file_id)
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_prefix text;
begin
-- Derive directory prefix from the file path by removing the last segment (filename)
-- Example: 'cabinet_id/file_id/filename.ext' -> 'cabinet_id/file_id'
v_prefix := regexp_replace(old.path, '/[^/]+$', '');
if tg_op = 'DELETE' then
-- Delete the original object and any artefacts under the file's directory
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
end if;
end if;
return null;
end
$$;

View File

@ -0,0 +1,41 @@
-- Add directory support to files table
-- Migration: Add directory support for folder uploads
-- Add new columns to files table
ALTER TABLE files
ADD COLUMN IF NOT EXISTS is_directory BOOLEAN DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS parent_directory_id UUID REFERENCES files(id) ON DELETE CASCADE,
ADD COLUMN IF NOT EXISTS relative_path TEXT,
ADD COLUMN IF NOT EXISTS directory_manifest JSONB,
ADD COLUMN IF NOT EXISTS upload_session_id UUID,
ADD COLUMN IF NOT EXISTS processing_status TEXT DEFAULT 'uploaded' CHECK (processing_status IN ('uploaded', 'processing', 'completed', 'failed', 'queued'));
-- Create index for efficient directory queries
CREATE INDEX IF NOT EXISTS idx_files_parent_directory ON files(parent_directory_id);
CREATE INDEX IF NOT EXISTS idx_files_upload_session ON files(upload_session_id);
CREATE INDEX IF NOT EXISTS idx_files_processing_status ON files(processing_status);
CREATE INDEX IF NOT EXISTS idx_files_is_directory ON files(is_directory);
-- Create directory manifest structure
COMMENT ON COLUMN files.is_directory IS 'True if this record represents a directory/folder';
COMMENT ON COLUMN files.parent_directory_id IS 'ID of parent directory if this file is inside an uploaded folder';
COMMENT ON COLUMN files.relative_path IS 'Relative path within the uploaded directory structure';
COMMENT ON COLUMN files.directory_manifest IS 'JSON manifest of directory contents including file count, total size, structure';
COMMENT ON COLUMN files.upload_session_id IS 'Groups files uploaded together in a single directory upload session';
COMMENT ON COLUMN files.processing_status IS 'Simple status tracking without auto-processing';
-- Example directory_manifest structure:
-- {
-- "total_files": 15,
-- "total_size_bytes": 12345678,
-- "directory_structure": {
-- "documents/": {
-- "file1.pdf": {"size": 123456, "mime_type": "application/pdf"},
-- "subdirectory/": {
-- "file2.docx": {"size": 234567, "mime_type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document"}
-- }
-- }
-- },
-- "upload_timestamp": "2024-09-23T12:00:00Z",
-- "upload_method": "directory_picker"
-- }

View File

@ -0,0 +1,16 @@
# Supabase Edge Functions
This document describes the available Edge Functions in this self-hosted Supabase instance.
## institute-geocoder
Institute address geocoding using SearXNG/OpenStreetMap
**Endpoints:**
- `/functions/v1/institute-geocoder`
- `/functions/v1/institute-geocoder/batch`
**Usage:** POST with institute_id and optional address data
**Dependencies:** SearXNG service, OpenStreetMap data

View File

@ -0,0 +1,16 @@
// Follow this setup guide to integrate the Deno language server with your editor:
// https://deno.land/manual/getting_started/setup_your_environment
// This enables autocomplete, go to definition, etc.
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
serve(async () => {
return new Response(
`"Hello from Edge Functions!"`,
{ headers: { "Content-Type": "application/json" } },
)
})
// To invoke:
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \
// --header 'Authorization: Bearer <anon/service_role API key>'

View File

@ -0,0 +1,391 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface BatchGeocodingRequest {
limit?: number
force_refresh?: boolean
institute_ids?: string[]
}
interface GeocodingResult {
institute_id: string
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: BatchGeocodingRequest = await req.json()
const limit = body.limit || 10
const forceRefresh = body.force_refresh || false
// Get institutes that need geocoding
let query = supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.not('import_id', 'is', null)
if (!forceRefresh) {
// Only get institutes without coordinates or with empty coordinates
query = query.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
}
if (body.institute_ids && body.institute_ids.length > 0) {
query = query.in('id', body.institute_ids)
}
const { data: institutes, error: fetchError } = await query.limit(limit)
if (fetchError) {
throw new Error(`Failed to fetch institutes: ${fetchError.message}`)
}
if (!institutes || institutes.length === 0) {
return new Response(
JSON.stringify({
success: true,
message: 'No institutes found that need geocoding',
processed: 0
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
console.log(`Processing ${institutes.length} institutes for geocoding`)
const results: GeocodingResult[] = []
let successCount = 0
let errorCount = 0
// Process institutes sequentially to avoid overwhelming the SearXNG service
let processedCount = 0
for (const institute of institutes) {
try {
const address = institute.address as any
if (!address) {
results.push({
institute_id: institute.id,
success: false,
message: 'No address information available',
error: 'Missing address data'
})
errorCount++
processedCount++
continue
}
// Build search query from address components
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
if (addressParts.length === 0) {
results.push({
institute_id: institute.id,
success: false,
message: 'No valid address components found',
error: 'Empty address parts'
})
errorCount++
processedCount++
continue
}
const searchQuery = addressParts.join(', ')
console.log(`Geocoding institute ${institute.id}: ${searchQuery}`)
// Query SearXNG for geocoding with fallback strategy
const geocodingResult = await geocodeAddressWithFallback(address, searxngUrl)
if (geocodingResult.success && geocodingResult.coordinates) {
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates.latitude,
longitude: geocodingResult.coordinates.longitude,
boundingbox: geocodingResult.coordinates.boundingbox,
geojson: geocodingResult.coordinates.geojson,
osm: geocodingResult.coordinates.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', institute.id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
results.push({
institute_id: institute.id,
success: true,
message: 'Successfully geocoded',
coordinates: geocodingResult.coordinates
})
successCount++
// Log the successful geocoding
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding',
message: 'Successfully geocoded institute address in batch',
data: {
institute_id: institute.id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
} else {
results.push({
institute_id: institute.id,
success: false,
message: 'Geocoding failed',
error: geocodingResult.error || 'Unknown error'
})
errorCount++
}
processedCount++
// Add a small delay between requests to be respectful to the SearXNG service
// Optimize delay based on batch size for better performance
if (processedCount < institutes.length) { // Don't delay after the last institute
const delay = institutes.length > 200 ? 50 : 100; // Faster processing for large batches
await new Promise(resolve => setTimeout(resolve, delay))
}
} catch (error) {
console.error(`Error processing institute ${institute.id}:`, error)
results.push({
institute_id: institute.id,
success: false,
message: 'Processing error',
error: error.message
})
errorCount++
}
}
// Log the batch operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding_complete',
message: 'Batch geocoding operation completed',
data: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount,
results: results
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Batch geocoding completed',
summary: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount
},
results: results
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in batch institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
try {
// Format search query for OSM
const osmQuery = `!osm ${searchQuery}`
const searchUrl = `${searxngUrl}/search?q=${encodeURIComponent(osmQuery)}&format=json`
const response = await fetch(searchUrl, {
method: 'GET',
headers: {
'Accept': 'application/json',
'User-Agent': 'ClassroomCopilot-BatchGeocoder/1.0'
}
})
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data = await response.json()
// Check if we have results - the number_of_results field might be unreliable
// so we check the results array directly
if (!data.results || data.results.length === 0) {
return {
success: false,
error: 'No results returned from SearXNG'
}
}
const result = data.results[0]
if (!result.latitude || !result.longitude) {
return {
success: false,
error: 'Missing latitude or longitude in SearXNG response'
}
}
return {
success: true,
coordinates: {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude),
boundingbox: result.boundingbox || [],
geojson: result.geojson,
osm: result.osm
}
}
} catch (error) {
console.error('Geocoding error:', error)
return {
success: false,
error: error.message
}
}
}
async function geocodeAddressWithFallback(address: any, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
// Strategy 1: Try full address (street + town + county + postcode)
if (address.street && address.town && address.county && address.postcode) {
const fullQuery = `${address.street}, ${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying full address: ${fullQuery}`)
const result = await geocodeAddress(fullQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Full address geocoding successful')
return result
}
}
// Strategy 2: Try town + county + postcode
if (address.town && address.county && address.postcode) {
const mediumQuery = `${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying medium address: ${mediumQuery}`)
const result = await geocodeAddress(mediumQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Medium address geocoding successful')
return result
}
}
// Strategy 3: Try just postcode
if (address.postcode) {
console.log(`Trying postcode only: ${address.postcode}`)
const result = await geocodeAddress(address.postcode, searxngUrl)
if (result.success && result.coordinates) {
console.log('Postcode geocoding successful')
return result
}
}
// Strategy 4: Try town + postcode
if (address.town && address.postcode) {
const simpleQuery = `${address.town}, ${address.postcode}`
console.log(`Trying simple address: ${simpleQuery}`)
const result = await geocodeAddress(simpleQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Simple address geocoding successful')
return result
}
}
// All strategies failed
return {
success: false,
error: 'No coordinates found with any address combination'
}
}

View File

@ -0,0 +1,317 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface BatchGeocodingRequest {
limit?: number
force_refresh?: boolean
institute_ids?: string[]
}
interface GeocodingResult {
institute_id: string
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_RATE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: BatchGeocodingRequest = await req.json()
const limit = body.limit || 10
const forceRefresh = body.force_refresh || false
// Get institutes that need geocoding
let query = supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.not('import_id', 'is', null)
if (!forceRefresh) {
// Only get institutes without coordinates or with empty coordinates
query = query.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
}
if (body.institute_ids && body.institute_ids.length > 0) {
query = query.in('id', body.institute_ids)
}
const { data: institutes, error: fetchError } = await query.limit(limit)
if (fetchError) {
throw new Error(`Failed to fetch institutes: ${fetchError.message}`)
}
if (!institutes || institutes.length === 0) {
return new Response(
JSON.stringify({
success: true,
message: 'No institutes found that need geocoding',
processed: 0
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
console.log(`Processing ${institutes.length} institutes for geocoding`)
const results: GeocodingResult[] = []
let successCount = 0
let errorCount = 0
// Process institutes sequentially to avoid overwhelming the SearXNG service
for (const institute of institutes) {
try {
const address = institute.address as any
if (!address) {
results.push({
institute_id: institute.id,
success: false,
message: 'No address information available',
error: 'Missing address data'
})
errorCount++
continue
}
// Build search query from address components
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
if (addressParts.length === 0) {
results.push({
institute_id: institute.id,
success: false,
message: 'No valid address components found',
error: 'Empty address parts'
})
errorCount++
continue
}
const searchQuery = addressParts.join(', ')
console.log(`Geocoding institute ${institute.id}: ${searchQuery}`)
// Query SearXNG for geocoding
const geocodingResult = await geocodeAddress(searchQuery, searxngUrl)
if (geocodingResult.success && geocodingResult.coordinates) {
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates.latitude,
longitude: geocodingResult.coordinates.longitude,
boundingbox: geocodingResult.coordinates.boundingbox,
geojson: geocodingResult.coordinates.geojson,
osm: geocodingResult.coordinates.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', institute.id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
results.push({
institute_id: institute.id,
success: true,
message: 'Successfully geocoded',
coordinates: geocodingResult.coordinates
})
successCount++
// Log the successful geocoding
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding',
message: 'Successfully geocoded institute address in batch',
data: {
institute_id: institute.id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
} else {
results.push({
institute_id: institute.id,
success: false,
message: 'Geocoding failed',
error: geocodingResult.error || 'Unknown error'
})
errorCount++
}
// Add a small delay between requests to be respectful to the SearXNG service
await new Promise(resolve => setTimeout(resolve, 100))
} catch (error) {
console.error(`Error processing institute ${institute.id}:`, error)
results.push({
institute_id: institute.id,
success: false,
message: 'Processing error',
error: error.message
})
errorCount++
}
}
// Log the batch operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding_complete',
message: 'Batch geocoding operation completed',
data: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount,
results: results
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Batch geocoding completed',
summary: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount
},
results: results
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in batch institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
try {
// Format search query for OSM
const osmQuery = `!osm ${searchQuery}`
const searchUrl = `${searxngUrl}/search?q=${encodeURIComponent(osmQuery)}&format=json`
const response = await fetch(searchUrl, {
method: 'GET',
headers: {
'Accept': 'application/json',
'User-Agent': 'ClassroomCopilot-BatchGeocoder/1.0'
}
})
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data = await response.json()
// Check if we have results - the number_of_results field might be unreliable
// so we check the results array directly
if (!data.results || data.results.length === 0) {
return {
success: false,
error: 'No results returned from SearXNG'
}
}
const result = data.results[0]
if (!result.latitude || !result.longitude) {
return {
success: false,
error: 'Missing latitude or longitude in SearXNG response'
}
}
return {
success: true,
coordinates: {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude),
boundingbox: result.boundingbox || [],
geojson: result.geojson,
osm: result.osm
}
}
} catch (error) {
console.error('Geocoding error:', error)
return {
success: false,
error: error.message
}
}
}

View File

@ -0,0 +1,315 @@
// Example usage of Institute Geocoder functions
// This file demonstrates how to integrate the geocoding functions in your frontend
import { createClient } from '@supabase/supabase-js'
// Initialize Supabase client
const supabaseUrl = process.env.NEXT_PUBLIC_SUPABASE_URL!
const supabaseAnonKey = process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!
const supabase = createClient(supabaseUrl, supabaseAnonKey)
// Types for institute data
interface Institute {
id: string
name: string
address: {
street?: string
town?: string
county?: string
postcode?: string
country?: string
}
geo_coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
search_query: string
geocoded_at: string
}
}
interface GeocodingResult {
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
}
error?: string
}
// 1. Geocode a single institute
export async function geocodeInstitute(instituteId: string): Promise<GeocodingResult> {
try {
const { data, error } = await supabase.functions.invoke('institute-geocoder', {
body: { institute_id: instituteId }
})
if (error) {
throw new Error(error.message)
}
return data
} catch (error) {
console.error('Geocoding failed:', error)
return {
success: false,
message: 'Geocoding failed',
error: error instanceof Error ? error.message : 'Unknown error'
}
}
}
// 2. Batch geocode multiple institutes
export async function batchGeocodeInstitutes(
limit: number = 10,
forceRefresh: boolean = false
): Promise<any> {
try {
const { data, error } = await supabase.functions.invoke('institute-geocoder/batch', {
body: {
limit,
force_refresh: forceRefresh
}
})
if (error) {
throw new Error(error.message)
}
return data
} catch (error) {
console.error('Batch geocoding failed:', error)
throw error
}
}
// 3. Get institutes that need geocoding
export async function getInstitutesNeedingGeocoding(): Promise<Institute[]> {
try {
const { data, error } = await supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
.not('import_id', 'is', null)
if (error) {
throw new Error(error.message)
}
return data || []
} catch (error) {
console.error('Failed to fetch institutes:', error)
return []
}
}
// 4. Display institute on a map (example with Leaflet)
export function displayInstituteOnMap(
institute: Institute,
mapElement: HTMLElement
): void {
if (!institute.geo_coordinates) {
console.warn('Institute has no coordinates:', institute.name)
return
}
// This is a placeholder - you'd need to implement actual map rendering
// For example, using Leaflet, Mapbox, or Google Maps
const { latitude, longitude } = institute.geo_coordinates
console.log(`Displaying ${institute.name} at ${latitude}, ${longitude}`)
// Example map implementation:
// const map = L.map(mapElement).setView([latitude, longitude], 13)
// L.marker([latitude, longitude]).addTo(map).bindPopup(institute.name)
}
// 5. React component example
export function InstituteGeocoder() {
const [institutes, setInstitutes] = useState<Institute[]>([])
const [loading, setLoading] = useState(false)
const [geocodingProgress, setGeocodingProgress] = useState(0)
// Load institutes that need geocoding
useEffect(() => {
loadInstitutes()
}, [])
async function loadInstitutes() {
const data = await getInstitutesNeedingGeocoding()
setInstitutes(data)
}
// Geocode all institutes
async function geocodeAllInstitutes() {
setLoading(true)
setGeocodingProgress(0)
try {
const result = await batchGeocodeInstitutes(institutes.length, false)
if (result.success) {
setGeocodingProgress(100)
// Reload institutes to show updated coordinates
await loadInstitutes()
}
} catch (error) {
console.error('Batch geocoding failed:', error)
} finally {
setLoading(false)
}
}
// Geocode single institute
async function geocodeSingleInstitute(instituteId: string) {
try {
const result = await geocodeInstitute(instituteId)
if (result.success) {
// Reload institutes to show updated coordinates
await loadInstitutes()
}
} catch (error) {
console.error('Single geocoding failed:', error)
}
}
return (
<div className="institute-geocoder">
<h2>Institute Geocoding</h2>
<div className="controls">
<button
onClick={geocodeAllInstitutes}
disabled={loading || institutes.length === 0}
>
{loading ? 'Geocoding...' : `Geocode All (${institutes.length})`}
</button>
{loading && (
<div className="progress">
<div
className="progress-bar"
style={{ width: `${geocodingProgress}%` }}
/>
</div>
)}
</div>
<div className="institutes-list">
{institutes.map(institute => (
<div key={institute.id} className="institute-item">
<h3>{institute.name}</h3>
<p>
{institute.address.street && `${institute.address.street}, `}
{institute.address.town && `${institute.address.town}, `}
{institute.address.county && `${institute.address.county}, `}
{institute.address.postcode}
</p>
{institute.geo_coordinates ? (
<div className="coordinates">
<span>📍 {institute.geo_coordinates.latitude}, {institute.geo_coordinates.longitude}</span>
<span>Geocoded: {new Date(institute.geo_coordinates.geocoded_at).toLocaleDateString()}</span>
</div>
) : (
<button
onClick={() => geocodeSingleInstitute(institute.id)}
disabled={loading}
>
Geocode
</button>
)}
</div>
))}
</div>
</div>
)
}
// 6. Utility functions for working with coordinates
export class CoordinateUtils {
// Calculate distance between two points (Haversine formula)
static calculateDistance(
lat1: number,
lon1: number,
lat2: number,
lon2: number
): number {
const R = 6371 // Earth's radius in kilometers
const dLat = this.toRadians(lat2 - lat1)
const dLon = this.toRadians(lon2 - lon1)
const a =
Math.sin(dLat / 2) * Math.sin(dLat / 2) +
Math.cos(this.toRadians(lat1)) * Math.cos(this.toRadians(lat2)) *
Math.sin(dLon / 2) * Math.sin(dLon / 2)
const c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a))
return R * c
}
// Convert degrees to radians
private static toRadians(degrees: number): number {
return degrees * (Math.PI / 180)
}
// Check if coordinates are within a bounding box
static isWithinBounds(
lat: number,
lon: number,
bounds: [number, number, number, number] // [minLat, maxLat, minLon, maxLon]
): boolean {
return lat >= bounds[0] && lat <= bounds[1] &&
lon >= bounds[2] && lon <= bounds[3]
}
// Format coordinates for display
static formatCoordinates(lat: number, lon: number): string {
const latDir = lat >= 0 ? 'N' : 'S'
const lonDir = lon >= 0 ? 'E' : 'W'
return `${Math.abs(lat).toFixed(6)}°${latDir}, ${Math.abs(lon).toFixed(6)}°${lonDir}`
}
}
// 7. Example of using coordinates in Neo4j queries
export const neo4jQueries = {
// Create institute node with location
createInstituteWithLocation: `
CREATE (i:Institute {
id: $institute_id,
name: $name,
location: point({latitude: $latitude, longitude: $longitude})
})
RETURN i
`,
// Find institutes within radius
findInstitutesWithinRadius: `
MATCH (i:Institute)
WHERE distance(i.location, point({latitude: $centerLat, longitude: $centerLon})) < $radiusMeters
RETURN i, distance(i.location, point({latitude: $centerLat, longitude: $centerLon})) as distance
ORDER BY distance
`,
// Find institutes in bounding box
findInstitutesInBounds: `
MATCH (i:Institute)
WHERE i.location.latitude >= $minLat
AND i.location.latitude <= $maxLat
AND i.location.longitude >= $minLon
AND i.location.longitude <= $maxLon
RETURN i
`
}
export default {
geocodeInstitute,
batchGeocodeInstitutes,
getInstitutesNeedingGeocoding,
displayInstituteOnMap,
InstituteGeocoder,
CoordinateUtils,
neo4jQueries
}

View File

@ -0,0 +1,325 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface GeocodingRequest {
institute_id: string
address?: string
street?: string
town?: string
county?: string
postcode?: string
country?: string
}
interface SearXNGResponse {
query: string
number_of_results: number
results: Array<{
title: string
longitude: string
latitude: string
boundingbox: string[]
geojson?: any
osm?: any
}>
}
interface GeocodingResult {
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: GeocodingRequest = await req.json()
if (!body.institute_id) {
return new Response(
JSON.stringify({ error: 'institute_id is required' }),
{
status: 400,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Get institute data from database
const { data: institute, error: fetchError } = await supabase
.from('institutes')
.select('*')
.eq('id', body.institute_id)
.single()
if (fetchError || !institute) {
return new Response(
JSON.stringify({ error: 'Institute not found' }),
{
status: 404,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Build search query from address components
let searchQuery = ''
if (body.address) {
searchQuery = body.address
} else {
const addressParts = [
body.street,
body.town,
body.county,
body.postcode,
body.country
].filter(Boolean)
searchQuery = addressParts.join(', ')
}
// If no search query provided, try to build from institute data
if (!searchQuery && institute.address) {
const address = institute.address as any
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
searchQuery = addressParts.join(', ')
}
if (!searchQuery) {
return new Response(
JSON.stringify({ error: 'No address information available for geocoding' }),
{
status: 400,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Query SearXNG for geocoding
const geocodingResult = await geocodeAddressWithFallback(institute.address, searxngUrl)
if (!geocodingResult.success) {
return new Response(
JSON.stringify({
error: 'Geocoding failed',
details: geocodingResult.error
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates!.latitude,
longitude: geocodingResult.coordinates!.longitude,
boundingbox: geocodingResult.coordinates!.boundingbox,
geojson: geocodingResult.coordinates!.geojson,
osm: geocodingResult.coordinates!.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', body.institute_id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
// Log the geocoding operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'geocoding',
message: 'Successfully geocoded institute address',
data: {
institute_id: body.institute_id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Institute geocoded successfully',
institute_id: body.institute_id,
coordinates: geocodingResult.coordinates
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<GeocodingResult> {
try {
console.log(`Geocoding address: ${searchQuery}`)
// Build the SearXNG query
const query = `!osm ${searchQuery}`
const url = `${searxngUrl}/search?q=${encodeURIComponent(query)}&format=json`
console.log(`SearXNG URL: ${url}`)
const response = await fetch(url)
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data: SearXNGResponse = await response.json()
console.log(`SearXNG response: ${JSON.stringify(data, null, 2)}`)
// Check if we have results
if (!data.results || data.results.length === 0) {
return {
success: false,
message: 'No results returned from SearXNG',
error: 'No results returned from SearXNG'
}
}
// Get the best result (first one)
const bestResult = data.results[0]
if (!bestResult.latitude || !bestResult.longitude) {
return {
success: false,
message: 'Result missing coordinates',
error: 'Result missing coordinates'
}
}
return {
success: true,
message: 'Geocoding successful',
coordinates: {
latitude: parseFloat(bestResult.latitude),
longitude: parseFloat(bestResult.longitude),
boundingbox: bestResult.boundingbox || [],
geojson: bestResult.geojson || null,
osm: bestResult.osm || null
}
}
} catch (error) {
console.error('Error in geocodeAddress:', error)
return {
success: false,
message: 'Geocoding failed',
error: error.message
}
}
}
async function geocodeAddressWithFallback(address: any, searxngUrl: string): Promise<GeocodingResult> {
// Strategy 1: Try full address (street + town + county + postcode)
if (address.street && address.town && address.county && address.postcode) {
const fullQuery = `${address.street}, ${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying full address: ${fullQuery}`)
const result = await geocodeAddress(fullQuery, searxngUrl)
if (result.success) {
console.log('Full address geocoding successful')
return result
}
}
// Strategy 2: Try town + county + postcode
if (address.town && address.county && address.postcode) {
const mediumQuery = `${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying medium address: ${mediumQuery}`)
const result = await geocodeAddress(mediumQuery, searxngUrl)
if (result.success) {
console.log('Medium address geocoding successful')
return result
}
}
// Strategy 3: Try just postcode
if (address.postcode) {
console.log(`Trying postcode only: ${address.postcode}`)
const result = await geocodeAddress(address.postcode, searxngUrl)
if (result.success) {
console.log('Postcode geocoding successful')
return result
}
}
// Strategy 4: Try town + postcode
if (address.town && address.postcode) {
const simpleQuery = `${address.town}, ${address.postcode}`
console.log(`Trying simple address: ${simpleQuery}`)
const result = await geocodeAddress(simpleQuery, searxngUrl)
if (result.success) {
console.log('Simple address geocoding successful')
return result
}
}
// All strategies failed
return {
success: false,
message: 'All geocoding strategies failed',
error: 'No coordinates found with any address combination'
}
}

View File

@ -0,0 +1,142 @@
// Test script for institute geocoder functions
// This can be run in the browser console or as a standalone test
interface TestCase {
name: string
address: string
expected_coords?: {
latitude: number
longitude: number
}
}
const testCases: TestCase[] = [
{
name: "10 Downing Street, London",
address: "10 Downing Street, London",
expected_coords: {
latitude: 51.5034878,
longitude: -0.1276965
}
},
{
name: "Buckingham Palace, London",
address: "Buckingham Palace, London",
expected_coords: {
latitude: 51.501364,
longitude: -0.124432
}
},
{
name: "Big Ben, London",
address: "Big Ben, London",
expected_coords: {
latitude: 51.499479,
longitude: -0.124809
}
}
]
async function testGeocoding() {
console.log("🧪 Starting Institute Geocoder Tests...")
for (const testCase of testCases) {
console.log(`\n📍 Testing: ${testCase.name}`)
try {
// Test the SearXNG service directly
const searchQuery = `!osm ${testCase.address}`
const searchUrl = `https://search.kevlarai.com/search?q=${encodeURIComponent(searchQuery)}&format=json`
console.log(`🔍 Searching: ${searchUrl}`)
const response = await fetch(searchUrl)
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const data = await response.json()
console.log(`📊 Results: ${data.number_of_results} found`)
if (data.results && data.results.length > 0) {
const result = data.results[0]
const coords = {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude)
}
console.log(`✅ Coordinates: ${coords.latitude}, ${coords.longitude}`)
if (testCase.expected_coords) {
const latDiff = Math.abs(coords.latitude - testCase.expected_coords.latitude)
const lonDiff = Math.abs(coords.longitude - testCase.expected_coords.longitude)
if (latDiff < 0.01 && lonDiff < 0.01) {
console.log(`🎯 Accuracy: High (within 0.01 degrees)`)
} else if (latDiff < 0.1 && lonDiff < 0.1) {
console.log(`🎯 Accuracy: Medium (within 0.1 degrees)`)
} else {
console.log(`⚠️ Accuracy: Low (difference > 0.1 degrees)`)
}
}
if (result.boundingbox) {
console.log(`🗺️ Bounding Box: ${result.boundingbox.join(', ')}`)
}
if (result.geojson) {
console.log(`🗺️ GeoJSON: ${result.geojson.type} with ${result.geojson.coordinates?.[0]?.length || 0} points`)
}
} else {
console.log(`❌ No results found`)
}
} catch (error) {
console.error(`❌ Test failed: ${error.message}`)
}
}
console.log("\n🏁 Testing completed!")
}
// Test address parsing function
function testAddressParsing() {
console.log("\n🔧 Testing Address Parsing...")
const testAddresses = [
{
street: "10 Downing Street",
town: "London",
county: "Greater London",
postcode: "SW1A 2AA",
country: "United Kingdom"
},
{
street: "Buckingham Palace",
town: "London",
county: "Greater London",
postcode: "SW1A 1AA",
country: "United Kingdom"
}
]
for (const addr of testAddresses) {
const parts = [addr.street, addr.town, addr.county, addr.postcode, addr.country].filter(Boolean)
const searchQuery = parts.join(', ')
console.log(`📍 Address: ${searchQuery}`)
}
}
// Run tests if this script is executed directly
if (typeof window !== 'undefined') {
// Browser environment
window.testGeocoding = testGeocoding
window.testAddressParsing = testAddressParsing
console.log("🧪 Institute Geocoder tests loaded. Run testGeocoding() or testAddressParsing() to test.")
} else {
// Node.js environment
console.log("🧪 Institute Geocoder tests loaded.")
}
export { testGeocoding, testAddressParsing }

View File

@ -0,0 +1,94 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
console.log('main function started')
const JWT_SECRET = Deno.env.get('JWT_SECRET')
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
function getAuthToken(req: Request) {
const authHeader = req.headers.get('authorization')
if (!authHeader) {
throw new Error('Missing authorization header')
}
const [bearer, token] = authHeader.split(' ')
if (bearer !== 'Bearer') {
throw new Error(`Auth header is not 'Bearer {token}'`)
}
return token
}
async function verifyJWT(jwt: string): Promise<boolean> {
const encoder = new TextEncoder()
const secretKey = encoder.encode(JWT_SECRET)
try {
await jose.jwtVerify(jwt, secretKey)
} catch (err) {
console.error(err)
return false
}
return true
}
serve(async (req: Request) => {
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
try {
const token = getAuthToken(req)
const isValidJWT = await verifyJWT(token)
if (!isValidJWT) {
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
} catch (e) {
console.error(e)
return new Response(JSON.stringify({ msg: e.toString() }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
}
const url = new URL(req.url)
const { pathname } = url
const path_parts = pathname.split('/')
const service_name = path_parts[1]
if (!service_name || service_name === '') {
const error = { msg: 'missing function name in request' }
return new Response(JSON.stringify(error), {
status: 400,
headers: { 'Content-Type': 'application/json' },
})
}
const servicePath = `/home/deno/functions/${service_name}`
console.error(`serving the request with ${servicePath}`)
const memoryLimitMb = 150
const workerTimeoutMs = 1 * 60 * 1000
const noModuleCache = false
const importMapPath = null
const envVarsObj = Deno.env.toObject()
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
try {
const worker = await EdgeRuntime.userWorkers.create({
servicePath,
memoryLimitMb,
workerTimeoutMs,
noModuleCache,
importMapPath,
envVars,
})
return await worker.fetch(req)
} catch (e) {
const error = { msg: e.toString() }
return new Response(JSON.stringify(error), {
status: 500,
headers: { 'Content-Type': 'application/json' },
})
}
})

View File

@ -0,0 +1,3 @@
volumes/db/data
volumes/storage
volumes/logs

51
volumes/mcp/Dockerfile Normal file
View File

@ -0,0 +1,51 @@
# Dockerfile for selfhosted-supabase-mcp HTTP mode
# Multi-stage build using Bun runtime for self-hosted Supabase
FROM oven/bun:1.1-alpine AS builder
WORKDIR /app
# Copy package files from submodule
COPY selfhosted-supabase-mcp/package.json selfhosted-supabase-mcp/bun.lock* ./
# Install dependencies
RUN bun install --frozen-lockfile || bun install
# Copy source code
COPY selfhosted-supabase-mcp/src ./src
COPY selfhosted-supabase-mcp/tsconfig.json ./
# Build the application
RUN bun build src/index.ts --outdir dist --target bun
# Production stage
FROM oven/bun:1.1-alpine AS runner
WORKDIR /app
# Create non-root user for security
RUN addgroup --system --gid 1001 mcp && \
adduser --system --uid 1001 --ingroup mcp mcp
# Copy built application from builder
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./
# Set ownership
RUN chown -R mcp:mcp /app
USER mcp
# Default environment variables
ENV NODE_ENV=production
# Health check
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:3100/health || exit 1
# Expose HTTP port
EXPOSE 3100
# Start the MCP server in HTTP mode
CMD ["bun", "run", "dist/index.js"]