This commit is contained in:
Kevin Carter 2025-11-14 14:46:49 +00:00
parent b9cc670ef4
commit 1941a2626d
38 changed files with 2843 additions and 1345 deletions

139
.archive/.env Normal file
View File

@ -0,0 +1,139 @@
## App Information
APP_NAME=ClassroomCopilot
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=localhost
APP_PROTOCOL=http
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=http://localhost,http://127.0.0.1
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## Supabase Analytics Configuration (Google Cloud)
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

View File

@ -394,12 +394,10 @@ services:
- ./db/init-scripts/51-webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/51-webhooks.sql - ./db/init-scripts/51-webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/51-webhooks.sql
- ./db/init-scripts/52-roles.sql:/docker-entrypoint-initdb.d/init-scripts/52-roles.sql - ./db/init-scripts/52-roles.sql:/docker-entrypoint-initdb.d/init-scripts/52-roles.sql
- ./db/init-scripts/52-jwt.sql:/docker-entrypoint-initdb.d/init-scripts/52-jwt.sql - ./db/init-scripts/52-jwt.sql:/docker-entrypoint-initdb.d/init-scripts/52-jwt.sql
- ./db/migrations/core/60-create-databases.sql:/docker-entrypoint-initdb.d/migrations/60-create-databases.sql
- ./db/migrations/core/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql - ./db/migrations/core/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql
- ./db/migrations/core/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql - ./db/migrations/core/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql
- ./db/migrations/core/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql - ./db/migrations/core/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql
- ./db/migrations/core/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql - ./db/migrations/core/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql
- ./db/migrations/core/65-keycloak-setup.sql:/docker-entrypoint-initdb.d/migrations/65-keycloak-setup.sql
- supabase-db-data:/var/lib/postgresql/data - supabase-db-data:/var/lib/postgresql/data
- supabase-db-config:/etc/postgresql-custom - supabase-db-config:/etc/postgresql-custom
networks: networks:

150
.archive/package-lock.json generated Normal file
View File

@ -0,0 +1,150 @@
{
"name": "supabase",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"jsonwebtoken": "^9.0.2"
}
},
"node_modules/buffer-equal-constant-time": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
"license": "BSD-3-Clause"
},
"node_modules/ecdsa-sig-formatter": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
"integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==",
"license": "Apache-2.0",
"dependencies": {
"safe-buffer": "^5.0.1"
}
},
"node_modules/jsonwebtoken": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz",
"integrity": "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==",
"license": "MIT",
"dependencies": {
"jws": "^3.2.2",
"lodash.includes": "^4.3.0",
"lodash.isboolean": "^3.0.3",
"lodash.isinteger": "^4.0.4",
"lodash.isnumber": "^3.0.3",
"lodash.isplainobject": "^4.0.6",
"lodash.isstring": "^4.0.1",
"lodash.once": "^4.0.0",
"ms": "^2.1.1",
"semver": "^7.5.4"
},
"engines": {
"node": ">=12",
"npm": ">=6"
}
},
"node_modules/jwa": {
"version": "1.4.2",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.2.tgz",
"integrity": "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==",
"license": "MIT",
"dependencies": {
"buffer-equal-constant-time": "^1.0.1",
"ecdsa-sig-formatter": "1.0.11",
"safe-buffer": "^5.0.1"
}
},
"node_modules/jws": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz",
"integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==",
"license": "MIT",
"dependencies": {
"jwa": "^1.4.1",
"safe-buffer": "^5.0.1"
}
},
"node_modules/lodash.includes": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz",
"integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==",
"license": "MIT"
},
"node_modules/lodash.isboolean": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz",
"integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==",
"license": "MIT"
},
"node_modules/lodash.isinteger": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz",
"integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==",
"license": "MIT"
},
"node_modules/lodash.isnumber": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz",
"integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==",
"license": "MIT"
},
"node_modules/lodash.isplainobject": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
"integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==",
"license": "MIT"
},
"node_modules/lodash.isstring": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
"integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==",
"license": "MIT"
},
"node_modules/lodash.once": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz",
"integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==",
"license": "MIT"
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/semver": {
"version": "7.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
"integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
}
}
}

5
.archive/package.json Normal file
View File

@ -0,0 +1,5 @@
{
"dependencies": {
"jsonwebtoken": "^9.0.2"
}
}

View File

@ -1,491 +0,0 @@
--[ Database Schema Version ]--
-- Version: 1.0.0
-- Last Updated: 2024-02-24
-- Description: Initial schema setup for ClassConcepts
-- Dependencies: auth.users (Supabase Auth)
--[ Validation ]--
do $$
begin
-- Verify required extensions
if not exists (select 1 from pg_extension where extname = 'uuid-ossp') then
raise exception 'Required extension uuid-ossp is not installed';
end if;
-- Verify auth schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'auth') then
raise exception 'Required auth schema is not available';
end if;
-- Verify storage schema exists
if not exists (select 1 from information_schema.schemata where schema_name = 'storage') then
raise exception 'Required storage schema is not available';
end if;
end $$;
--[ 1. Extensions ]--
create extension if not exists "uuid-ossp";
-- Create rpc schema if it doesn't exist
create schema if not exists rpc;
grant usage on schema rpc to anon, authenticated;
-- Create exec_sql function for admin operations
create or replace function exec_sql(query text)
returns void as $$
begin
execute query;
end;
$$ language plpgsql security definer;
-- Create updated_at trigger function
create or replace function public.handle_updated_at()
returns trigger as $$
begin
new.updated_at = timezone('utc'::text, now());
return new;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]--
-- Base user profiles
create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade,
email text not null unique,
user_type text not null check (user_type in ('admin', 'email_teacher', 'email_student')),
username text not null unique,
display_name text,
metadata jsonb default '{}'::jsonb,
last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: admin, teacher, or student';
-- Institute import data
create table if not exists public.institute_imports (
id uuid primary key default uuid_generate_v4(),
urn text unique,
establishment_name text not null,
la_code text,
la_name text,
establishment_number text,
establishment_type text,
establishment_type_group text,
establishment_status text,
reason_establishment_opened text,
open_date date,
reason_establishment_closed text,
close_date date,
phase_of_education text,
statutory_low_age integer,
statutory_high_age integer,
boarders text,
nursery_provision text,
official_sixth_form text,
gender text,
religious_character text,
religious_ethos text,
diocese text,
admissions_policy text,
school_capacity integer,
special_classes text,
census_date date,
number_of_pupils integer,
number_of_boys integer,
number_of_girls integer,
percentage_fsm numeric(5,2),
trust_school_flag text,
trusts_name text,
school_sponsor_flag text,
school_sponsors_name text,
federation_flag text,
federations_name text,
ukprn text,
fehe_identifier text,
further_education_type text,
ofsted_last_inspection date,
last_changed_date date,
street text,
locality text,
address3 text,
town text,
county text,
postcode text,
school_website text,
telephone_num text,
head_title text,
head_first_name text,
head_last_name text,
head_preferred_job_title text,
gssla_code text,
parliamentary_constituency text,
urban_rural text,
rsc_region text,
country text,
uprn text,
sen_stat boolean,
sen_no_stat boolean,
sen_unit_on_roll integer,
sen_unit_capacity integer,
resourced_provision_on_roll integer,
resourced_provision_capacity integer,
metadata jsonb default '{}'::jsonb,
imported_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_imports is 'Raw institute data imported from external sources';
-- Active institutes
create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(),
import_id uuid references public.institute_imports(id),
name text not null,
urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb,
website text,
metadata jsonb default '{}'::jsonb,
neo4j_unique_id text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
neo4j_private_sync_at timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institutes is 'Active institutes in the system';
--[ 6. Relationship Tables ]--
-- Institute memberships
create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('admin', 'teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(profile_id, institute_id)
);
comment on table public.institute_memberships is 'Manages user roles and relationships with institutes';
-- Membership requests
create table if not exists public.institute_membership_requests (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade,
requested_role text check (requested_role in ('teacher', 'student')),
status text default 'pending' check (status in ('pending', 'approved', 'rejected')),
metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 7. Audit Tables ]--
-- System audit logs
create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete set null,
action_type text,
table_name text,
record_id uuid,
changes jsonb,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 8. Auth Functions ]--
-- Create a secure function to check admin status
create or replace function auth.is_admin()
returns boolean as $$
select coalesce(
(select true
from public.profiles
where id = auth.uid()
and user_type = 'admin'),
false
);
$$ language sql security definer;
-- Create a secure function to check super admin status
create or replace function auth.is_super_admin()
returns boolean as $$
select coalesce(
(select role = 'supabase_admin'
from auth.users
where id = auth.uid()),
false
);
$$ language sql security definer;
-- Create public wrappers for the auth functions
create or replace function public.is_admin()
returns boolean as $$
select auth.is_admin();
$$ language sql security definer;
create or replace function public.is_super_admin()
returns boolean as $$
select auth.is_super_admin();
$$ language sql security definer;
-- Grant execute permissions
grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated;
grant execute on function auth.is_admin to authenticated;
grant execute on function auth.is_super_admin to authenticated;
-- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
declare
result json;
begin
-- Only allow this to run as service role or supabase_admin
if not (
current_user = 'service_role'
or exists (
select 1 from pg_roles
where rolname = current_user
and rolsuper
)
) then
raise exception 'Must be run as service_role or superuser';
end if;
-- Update user_type and username for admin
update public.profiles
set user_type = 'admin',
username = coalesce(username, 'superadmin'),
display_name = coalesce(display_name, 'Super Admin')
where email = admin_email
returning json_build_object(
'id', id,
'email', email,
'user_type', user_type,
'username', username,
'display_name', display_name
) into result;
if result is null then
raise exception 'Admin user with email % not found', admin_email;
end if;
return result;
end;
$$;
-- Grant execute permissions
revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role, supabase_admin;
-- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text)
returns json
language plpgsql
security definer
as $$
begin
return public.setup_initial_admin(admin_email);
end;
$$;
-- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role, supabase_admin;
--[ 9. Utility Functions ]--
-- Check if database is ready
create or replace function check_db_ready()
returns boolean
language plpgsql
security definer
as $$
begin
-- Check if essential schemas exist
if not exists (
select 1
from information_schema.schemata
where schema_name in ('auth', 'storage', 'public')
) then
return false;
end if;
-- Check if essential tables exist
if not exists (
select 1
from information_schema.tables
where table_schema = 'auth'
and table_name = 'users'
) then
return false;
end if;
-- Check if RLS is enabled on public.profiles
if not exists (
select 1
from pg_tables
where schemaname = 'public'
and tablename = 'profiles'
and rowsecurity = true
) then
return false;
end if;
return true;
end;
$$;
-- Grant execute permission
grant execute on function check_db_ready to anon, authenticated, service_role;
-- Function to handle new user registration
create or replace function public.handle_new_user()
returns trigger
language plpgsql
security definer set search_path = public
as $$
declare
default_user_type text := 'email_student';
default_username text;
begin
-- Generate username from email
default_username := split_part(new.email, '@', 1);
insert into public.profiles (
id,
email,
user_type,
username,
display_name
)
values (
new.id,
new.email,
coalesce(new.raw_user_meta_data->>'user_type', default_user_type),
coalesce(new.raw_user_meta_data->>'username', default_username),
coalesce(new.raw_user_meta_data->>'display_name', default_username)
);
return new;
end;
$$;
-- Trigger for new user creation
drop trigger if exists on_auth_user_created on auth.users;
create trigger on_auth_user_created
after insert on auth.users
for each row execute procedure public.handle_new_user();
--[ 10. Security Setup ]--
-- Enable RLS
alter table if exists public.profiles enable row level security;
alter table if exists public.institute_imports enable row level security;
alter table if exists public.institutes enable row level security;
alter table if exists public.institute_memberships enable row level security;
alter table if exists public.institute_membership_requests enable row level security;
alter table if exists public.audit_logs enable row level security;
-- First, ensure proper schema access
grant usage on schema public to anon, authenticated;
-- First, drop existing policies
drop policy if exists "Users can read and update own profile" on public.profiles;
drop policy if exists "Users can update their profile during registration" on public.profiles;
-- Create updated policies
create policy "Users can read own profile"
on public.profiles for select
to authenticated
using (auth.uid() = id);
create policy "Users can update own profile"
on public.profiles for update
to authenticated
using (auth.uid() = id)
with check (auth.uid() = id);
create policy "Public can read basic profile info"
on public.profiles for select
to anon, authenticated
using (
user_type in ('email_teacher', 'email_student')
);
create policy "Super admins have full access"
on public.profiles for all
using (auth.is_super_admin());
create policy "Admins can read all profiles"
on public.profiles for select
using (auth.is_admin() or auth.is_super_admin());
-- Grant permissions
grant select, update on public.profiles to authenticated;
grant select (id, email, user_type, display_name) on public.profiles to anon;
-- Storage bucket policies
alter table if exists storage.buckets enable row level security;
-- Allow super admin full access to buckets
create policy "Super admin has full access to buckets"
on storage.buckets for all
using (current_user = 'service_role' or current_user = 'supabase_admin' or current_user = 'authenticated');
-- Allow authenticated users to create buckets if they are the owner
create policy "Users can create their own buckets"
on storage.buckets for insert
to authenticated
with check (true); -- We'll handle ownership in the application layer
-- Allow users to view buckets they own or public buckets
create policy "Users can view their own buckets"
on storage.buckets for select
to authenticated
using (
owner::text = auth.uid()::text
);
--[ 11. Database Triggers ]--
drop trigger if exists handle_profiles_updated_at on public.profiles;
create trigger handle_profiles_updated_at
before update on public.profiles
for each row execute function public.handle_updated_at();
drop trigger if exists handle_institute_memberships_updated_at on public.institute_memberships;
create trigger handle_institute_memberships_updated_at
before update on public.institute_memberships
for each row execute function public.handle_updated_at();
drop trigger if exists handle_membership_requests_updated_at on public.institute_membership_requests;
create trigger handle_membership_requests_updated_at
before update on public.institute_membership_requests
for each row execute function public.handle_updated_at();
--[ 12. Permissions ]--
-- Grant schema access
grant usage on schema public to postgres, anon, authenticated;
-- Grant table permissions
grant all privileges on all tables in schema public to postgres;
grant select, insert, update on all tables in schema public to authenticated;
--[ 13. Realtime Setup ]--
-- Drop existing publication if it exists
drop publication if exists supabase_realtime;
-- Create publication (without IF NOT EXISTS)
create publication supabase_realtime;
-- Add tables to publication (these are idempotent operations)
alter publication supabase_realtime add table profiles;
alter publication supabase_realtime add table institute_imports;
alter publication supabase_realtime add table institutes;
alter publication supabase_realtime add table institute_memberships;
alter publication supabase_realtime add table institute_membership_requests;
alter publication supabase_realtime add table audit_logs;

View File

@ -0,0 +1 @@
main

481
.env
View File

@ -1,481 +0,0 @@
HOST_IP=localhost
## App Information
APP_NAME=ClassroomCopilot
APP_VERSION=0.0.1
APP_DESCRIPTION="An AI copilot for learners and educators."
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=classroomcopilot.ai
APP_URL_INTERNAL=classroomcopilot.ai
APP_PROTOCOL=https
APP_WS_PROTOCOL=wss
API_EXTERNAL_URL=${APP_PROTOCOL}://supa.${APP_URL}
# KevelarAI URLs
KEVLARAI_URL=kevlarai.ai
KEVLARAI_PROTOCOL=https
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
## Hosts
HOST_OLLAMA=${HOST_IP}
## Ports
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
#############################################################
## APP CONFIGURATION
#############################################################
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
SUPABASE_URL=${APP_PROTOCOL}://supa.${APP_URL}
SUPABASE_PUBLIC_URL=${APP_PROTOCOL}://supastudio.${APP_URL}
## App domains
APP_SITE_URL=${SITE_URL}
APP_SUPABASE_URL=${SUPABASE_URL}
APP_STUDIO_URL=${SUPABASE_PUBLIC_URL}
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=""
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## App Information
APP_NAME=ClassroomCopilot
APP_VERSION=0.0.1
APP_DESCRIPTION="An AI copilot for learners and educators."
APP_AUTHOR=KevlarAI
APP_AUTHOR_EMAIL=kcar@kevlarai.com
APP_URL=classroomcopilot.ai
APP_URL_INTERNAL=classroomcopilot.internal
APP_PROTOCOL=https
APP_WS_PROTOCOL=wss
# KevelarAI URLs
KEVLARAI_URL=kevlarai.ai
KEVLARAI_PROTOCOL=https
# Super Admin user
SUPER_ADMIN_EMAIL=admin@classroomcopilot.ai
SUPER_ADMIN_WORKER_EMAIL=kcar@kevlarai.com
SUPER_ADMIN_PASSWORD=password
SUPER_ADMIN_USERNAME=superadmin
SUPER_ADMIN_NAME="Super Admin"
SUPER_ADMIN_DISPLAY_NAME="CC Creator"
SUPER_ADMIN_CALENDAR_START_DATE=2025-01-01
SUPER_ADMIN_CALENDAR_END_DATE=2025-01-31
## Runtime settings
PROJECT_DIR=/Users/kcar/dev/ClassroomCopilot
BUILD_OS=macos
NGINX_MODE=prod
DEV_MODE=false
HOST_IP=localhost
BACKEND_DEV_MODE=false
STRICT_MODE=false
SUPER_ADMIN_CHECK=true
INIT_SUPER_ADMIN=false
## Docker compose environment variables
COMPOSE_PROJECT_NAME=classroomcopilot-${NGINX_MODE:-dev}
## Hosts
HOST_OLLAMA=${HOST_IP}
## Ports
PORT_KEYCLOAK=8080
PORT_KEYCLOAK_MANAGEMENT=9000
PORT_KEYCLOAK_SSL=8444
PORT_CC_ADMIN=5173
PORT_CC_ADMIN_DEVTOOLS=5001
PORT_SUPABASE_KONG_HTTP=8000
PORT_SUPABASE_KONG_HTTPS=8443
PORT_SUPABASE_STUDIO=3000
PORT_SUPABASE_POSTGRES=5432
# PORT_SOLID_CSS=3006 # not used currently in docker on by localhost solid server
PORT_SOLID_PROXY=3007
PORT_SOLID_PROXY_SSL=3008
PORT_NEO4J_BOLT=7687
PORT_NEO4J_HTTP=7474
PORT_NEO4J_HTTPS=7473
PORT_FRONTEND=3003
PORT_FRONTEND_SSL=3033
PORT_MARKETING_SITE=3004
PORT_MARKETING_SITE_SSL=3044
PORT_BACKEND=8880
PORT_BACKEND_SSL=8088
PORT_TLDRAW_SYNC=5002
PORT_WHISPERLIVE=5050
PORT_WHISPERLIVE_SSL=5053
PORT_TEXT_GENERATION=7861
PORT_TEXT_GENERATION_API=5010
PORT_STABLE_DIFFUSION=7860
PORT_STABLE_DIFFUSION_API=5011
PORT_OLLAMA=11434
PORT_OPEN_WEBUI=3333
PORT_OPEN_WEBUI_SSL=3334
PORT_OPENWEBUI_PROXY_INTERNAL=3335
PORT_MORPHIC=3001
PORT_REDIS=6379
PORT_SEARXNG=8090
PORT_MAILHOG_SMTP=1025
PORT_MAILHOG_WEB=8025
# WhisperLive Frontend
PORT_WHISPERLIVE_FRONTEND=5054
PORT_WHISPERLIVE_FRONTEND_SSL=5055
#############################################################
## APP CONFIGURATION
#############################################################
## Supabase Basic URLs and Endpoints
SITE_URL=${APP_PROTOCOL}://${APP_URL}
SUPABASE_URL=${APP_PROTOCOL}://supa.${APP_URL}
API_EXTERNAL_URL=${APP_PROTOCOL}://supa.${APP_URL}
SUPABASE_PUBLIC_URL=${APP_PROTOCOL}://supastudio.${APP_URL}
## App domains
APP_SITE_URL=${SITE_URL}
APP_SUPABASE_URL=${SUPABASE_URL}
APP_STUDIO_URL=${SUPABASE_PUBLIC_URL}
APP_API_URL=${APP_PROTOCOL}://api.${APP_URL}
APP_GRAPH_URL=${APP_PROTOCOL}://graph.${APP_URL}
APP_BOLT_URL=bolt://neo4j:${PORT_NEO4J_BOLT}
CC_ADMIN_URL=${APP_PROTOCOL}://admin.${APP_URL}
APP_ADMIN_API_URL=${APP_PROTOCOL}://admin-api.${APP_URL}
## Vite environment variables
VITE_APP_URL=app.${APP_URL}
#############################################################
## OAUTH2 PROXY CONFIGURATION
#############################################################
KEYCLOAK_SECRET_OPENWEBUI=XbKriIGb1YRSKmALfoKodpyJaQQOtP4U
KEYCLOAK_SECRET_ADMIN=""
COOKIE_SECRET_OPENWEBUI=QAm4ImW8ieeEftQgRly5guVYqHzcU/m+to5k5sHqfF8=
COOKIE_SECRET_ADMIN=yDaNr1DwYqRykdoeW+mS/Ari5pWs8m4YPQJsjIt2xYQ=
#############################################################
## SUPABASE CONFIGURATION
#############################################################
## Supabase Authentication Keys and Secrets
# JWT configuration
JWT_SECRET=mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc
JWT_EXPIRY=3600
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
# API Keys
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6ImFub24ifQ.utdDZzVlhYIc-cSXuC2kyZz7HN59YfyMH4eaOw1hRlk
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzM0OTg4MzkxLCJpc3MiOiJzdXBhYmFzZSIsImV4cCI6MTc2NjUyNDM5MSwicm9sZSI6InNlcnZpY2Vfcm9sZSJ9.y-HHZC_Rxr8OTOX2rmb8ZgMnwLkSJYAF_lIHjkVtAyc
## Supabase Database Configuration
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=${PORT_SUPABASE_POSTGRES}
## Supabase Dashboard Configuration
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=password
## Supabase Pooler Configuration (Database Connection Pooling)
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
## Supabase Kong API Gateway Configuration
KONG_HTTP_PORT=${PORT_SUPABASE_KONG_HTTP}
KONG_HTTPS_PORT=${PORT_SUPABASE_KONG_HTTPS}
## Supabase PostgREST Configuration
PGRST_DB_SCHEMAS=public,storage,graphql_public
## Supabase Auth Server Configuration
# General Auth Settings
ADDITIONAL_REDIRECT_URLS=""
AUTH_LOG_LEVEL=debug
DISABLE_SIGNUP=false
# Security Settings
# Uncomment these for enhanced security
# GOTRUE_SECURITY_REFRESH_TOKEN_ROTATION_ENABLED=true
# GOTRUE_SECURITY_REFRESH_TOKEN_REUSE_INTERVAL=30s
# GOTRUE_SECURITY_UPDATE_PASSWORD_REQUIRE_REAUTHENTICATION=true
# GOTRUE_PASSWORD_MIN_LENGTH=10
# GOTRUE_PASSWORD_REQUIRED_CHARACTERS=lowercase:uppercase:number:symbol
# Rate Limiting
# Uncomment these to enable rate limiting
# GOTRUE_RATE_LIMIT_HEADER=IP
# GOTRUE_RATE_LIMIT_EMAIL_SENT=4
## Supabase Email Configuration
# Mailer URL Paths
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
MAILER_SECURE_EMAIL_CHANGE_ENABLED=true
GOTRUE_MAILER_EXTERNAL_HOSTS="localhost,supabase.localhost"
# Email Auth Settings
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=true
SMTP_ADMIN_EMAIL=${APP_AUTHOR_EMAIL}
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
SMTP_HOST=smtp.zoho.eu
SMTP_PORT=587
SMTP_USER=admin@${APP_URL}
SMTP_PASS=&%Z040&%
SMTP_ADMIN_EMAIL=admin@${APP_URL}
SMTP_SENDER_NAME="Classroom Copilot"
## Supabase Phone Auth Configuration
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
## Supabase Anonymous Users
ENABLE_ANONYMOUS_USERS=false
## Supabase OAuth Providers
# Azure Auth
AZURE_ENABLED=false
AZURE_CLIENT_ID=c9a27d21-2012-44ce-9ebd-ffc868444383
AZURE_SECRET=.Nr8Q~kBXgDp_aX7~TlgCbzJHPledeTQwfTzja5y
AZURE_REDIRECT_URI=${APP_PROTOCOL}://${APP_URL}/web/auth/callback
AZURE_TENANT_ID=e637ec20-60ca-4dfc-a605-d2798f9e977b
## Supabase Studio Configuration
SUPABASE_PROJECT_ID=${APP_NAME}
STUDIO_DEFAULT_ORGANIZATION=${APP_AUTHOR}
STUDIO_DEFAULT_PROJECT=${APP_NAME}
STUDIO_PORT=${PORT_SUPABASE_STUDIO}
IMGPROXY_ENABLE_WEBP_DETECTION=true
## Supabase Functions Configuration
FUNCTIONS_VERIFY_JWT=false
## Supabase Logs Configuration
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
## Supabase Analytics Configuration (Google Cloud)
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
#############################################################
## OTHER SERVICES CONFIGURATION
#############################################################
# Neo4j Settings
USER_NEO4J=neo4j
PASSWORD_NEO4J=password
NEO4J_AUTH=${USER_NEO4J}/${PASSWORD_NEO4J}
## Keycloak Configuration
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
KEYCLOAK_DB_USER=keycloak
KEYCLOAK_DB_PASSWORD=keycloak
KEYCLOAK_DB_DATABASE=keycloak
KEYCLOAK_PORT=${PORT_KEYCLOAK}
KEYCLOAK_MANAGEMENT_PORT=${PORT_KEYCLOAK_MANAGEMENT}
KEYCLOAK_SSL_PORT=${PORT_KEYCLOAK_SSL}
KEYCLOAK_IMAGE=quay.io/keycloak/keycloak:24.0.1
KEYCLOAK_REALM=classroomcopilot
KEYCLOAK_CLIENT_ID=frontend-app
KEYCLOAK_CLIENT_SECRET=your-super-secret-and-long-keycloak-client-secret
KEYCLOAK_URL=${KEVLARAI_PROTOCOL}://keycloak.${KEVLARAI_URL}
KEYCLOAK_ADMIN_URL=${KEVLARAI_PROTOCOL}://keycloak-admin.${KEVLARAI_URL}
KEYCLOAK_INTERNAL_URL=http://keycloak:8080
## Backend
UVICORN_WORKERS=2
CORS_SITE_URL=${APP_URL}
NODE_FILESYSTEM_PATH=/node_filesystem
BACKEND_INIT_PATH=/init
LOG_PATH=/logs
# Log level must be lowercase for Node.js services using Pino logger (storage, functions)
# Valid values: trace, debug, info, warn, error, fatal
LOG_LEVEL=debug
# Whisper live settings
WHISPERLIVE_SSL=false
WHISPL_USE_CUSTOM_MODEL=false
FASTERWHISPER_MODEL=faster-whisper-large-v3
WHISPERLIVE_URL=${APP_WS_PROTOCOL}://whisperlive.${APP_URL}
## SearXNG Settings
SEARXNG_URL=${APP_PROTOCOL}://search.${APP_URL}
SEARXNG_SECRET="" # generate a secret key e.g. openssl rand -base64 32
SEARXNG_PORT=${PORT_SEARXNG} # default port
SEARXNG_BIND_ADDRESS=0.0.0.0 # default address
SEARXNG_IMAGE_PROXY=true # enable image proxy
SEARXNG_LIMITER=false # can be enabled to limit the number of requests per IP address
SEARXNG_DEFAULT_DEPTH=basic # Set to 'basic' or 'advanced', only affects SearXNG searches
SEARXNG_MAX_RESULTS=50 # Maximum number of results to return from SearXNG
SEARXNG_ENGINES=google,bing,duckduckgo,wikipedia # Search engines to use
SEARXNG_TIME_RANGE=None # Time range for search results: day, week, month, year, or None (for all time)
SEARXNG_SAFESEARCH=0 # Safe search setting: 0 (off), 1 (moderate), 2 (strict)
## Morphic Settings
NEXT_PUBLIC_BASE_URL=http://morphic:3001
USE_LOCAL_REDIS=true
LOCAL_REDIS_URL=redis://redis:6379
SEARXNG_API_URL=${APP_PROTOCOL}://search.${APP_URL}
SEARCH_API=searxng # use searxng, tavily or exa
## Notion settings
NOTION_CAPTAINS_LOG_SENDER_INTERNAL_INTEGRATION_SECRET=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
## API Keys
OPENAI_API_KEY=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
LANGCHAIN_API_KEY=ls__27405da61a724d18ba4833a0b79730e0
## Other Settings
LANGCHAIN_TRACING_V2=true
LANGCHAIN_PROJECT='LangChain Perpexity Clone with human in the loop for Classroom Copilot'
USER_AGENT='cc_user_agent'
# Google API Settings
YOUTUBE_API_KEY=AIzaSyDbpJInK6dsFUjY6oG60FlzYkj7JUJmUNs
GOOGLE_CLIENT_SECRETS_FILE=Users/kcar/ClassroomCopilot/backend/app/secrets/google_cloud_yt_credentials.json

5
.env.local Normal file
View File

@ -0,0 +1,5 @@
# Supabase Local Development Environment Variables
# This file configures the JWT issuer for local development
# Set the site URL to use HTTP (matches frontend configuration)
SITE_URL=http://localhost:8000

0
.gitignore vendored Normal file
View File

1
.temp/cli-latest Normal file
View File

@ -0,0 +1 @@
v2.58.5

View File

@ -7,7 +7,7 @@ project_id = "ClassroomCopilot"
[api] [api]
enabled = true enabled = true
# Port to use for the API URL. # Port to use for the API URL.
port = "env(PORT_SUPABASE_KONG_HTTP)" port = 8000
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API # Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default. # endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"] schemas = ["public", "graphql_public"]
@ -23,7 +23,7 @@ enabled = false
[db] [db]
# Port to use for the local database URL. # Port to use for the local database URL.
port = "env(PORT_SUPABASE_POSTGRES)" port = 5432
# Port used by db diff command to initialize the shadow database. # Port used by db diff command to initialize the shadow database.
shadow_port = 54320 shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW # The database major version to use. This has to be the same as your remote database's. Run `SHOW
@ -43,19 +43,28 @@ default_pool_size = 20
max_client_conn = 100 max_client_conn = 100
[db.vault] [db.vault]
secret_key = "env(VAULT_ENC_KEY)" #secret_key = "mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc"
[db.migrations] [db.migrations]
# Specifies an ordered list of schema files that describe your database. # Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql" # Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = ["./db/init-scripts/*.sql", "./db/migrations/*.sql"] schema_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql"
]
[db.seed] [db.seed]
# If enabled, seeds the database after migrations during a db reset. # If enabled, seeds the database after migrations during a db reset.
enabled = true enabled = true
# Specifies an ordered list of seed files to load during db reset. # Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql" # Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = ["./db/init/seed.sql"] sql_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql",
"./db/init/seed.sql"
]
[realtime] [realtime]
enabled = true enabled = true
@ -67,11 +76,11 @@ enabled = true
[studio] [studio]
enabled = true enabled = true
# Port to use for Supabase Studio. # Port to use for Supabase Studio.
port = "env(PORT_SUPABASE_STUDIO)" port = 3000
# External URL of the API server that frontend connects to. # External URL of the API server that frontend connects to.
api_url = "http://localhost" api_url = "http://localhost"
# OpenAI API Key to use for Supabase AI in the Supabase Studio. # OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "env(OPENAI_API_KEY)" openai_api_key = "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"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they # Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface. # are monitored, and you can view the emails that would have been sent from the web interface.
@ -82,8 +91,8 @@ port = 54324
# Uncomment to expose additional ports for testing user applications that send emails. # Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325 # smtp_port = 54325
# pop3_port = 54326 # pop3_port = 54326
admin_email = "env(SUPER_ADMIN_EMAIL)" admin_email = "admin@classroomcopilot.ai"
sender_name = "env(SUPER_ADMIN_NAME)" sender_name = "Super Admin"
[storage] [storage]
enabled = true enabled = true
@ -105,9 +114,9 @@ file_size_limit = "50MiB"
enabled = true enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails. # in emails.
site_url = "env(SITE_URL)" site_url = "http://localhost:8000"
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication. # A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["env(ADDITIONAL_REDIRECT_URLS)"] additional_redirect_urls = ["http://localhost", "http://127.0.0.1"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week). # How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600 jwt_expiry = 3600
# If disabled, the refresh token will never expire. # If disabled, the refresh token will never expire.
@ -286,10 +295,10 @@ backend = "postgres"
# Configures Postgres storage engine to use OrioleDB (S3) # Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = "" orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com # Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = "env(S3_HOST)" s3_host = ""
# Configures S3 bucket region, eg. us-east-1 # Configures S3 bucket region, eg. us-east-1
s3_region = "env(S3_REGION)" s3_region = ""
# Configures AWS_ACCESS_KEY_ID for S3 bucket # Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = "env(S3_ACCESS_KEY)" s3_access_key = ""
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket # Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = "env(S3_SECRET_KEY)" s3_secret_key = ""

305
config.toml.backup Normal file
View File

@ -0,0 +1,305 @@
# For detailed configuration reference documentation, visit:
# https://supabase.com/docs/guides/local-development/cli/config
# A string used to distinguish different Supabase projects on the same host. Defaults to the
# working directory name when running `supabase init`.
project_id = "ClassroomCopilot"
[api]
enabled = true
# Port to use for the API URL.
port = 8000
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"]
# Extra schemas to add to the search_path of every request.
extra_search_path = ["public", "extensions"]
# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
# for accidental or malicious requests.
max_rows = 1000
[api.tls]
# Enable HTTPS endpoints locally using a self-signed certificate.
enabled = false
[db]
# Port to use for the local database URL.
port = 5432
# Port used by db diff command to initialize the shadow database.
shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW
# server_version;` on the remote database to check.
major_version = 15
[db.pooler]
enabled = false
# Port to use for the local connection pooler.
port = 54329
# Specifies when a server connection can be reused by other clients.
# Configure one of the supported pooler modes: `transaction`, `session`.
pool_mode = "transaction"
# How many server connections to allow per user/database pair.
default_pool_size = 20
# Maximum number of client connections allowed.
max_client_conn = 100
[db.vault]
#secret_key = "mE9FCC2YvHyrFIyyloH27F3lw51Ij93a77ejMZY-NRc"
[db.migrations]
# Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql"
]
[db.seed]
# If enabled, seeds the database after migrations during a db reset.
enabled = true
# Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = [
"./db/init-scripts/*.sql",
"./db/migrations/supabase/*.sql",
"./db/migrations/core/*.sql",
"./db/init/seed.sql"
]
[realtime]
enabled = true
# Bind realtime via either IPv4 or IPv6. (default: IPv4)
# ip_version = "IPv6"
# The maximum length in bytes of HTTP request headers. (default: 4096)
# max_header_length = 4096
[studio]
enabled = true
# Port to use for Supabase Studio.
port = 3000
# External URL of the API server that frontend connects to.
api_url = "http://localhost"
# OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "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"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface.
[inbucket]
enabled = true
# Port to use for the email testing server web interface.
port = 54324
# Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325
# pop3_port = 54326
admin_email = "admin@classroomcopilot.ai"
sender_name = "Super Admin"
[storage]
enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"
# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true
# Uncomment to configure local storage buckets
# [storage.buckets.images]
# public = false
# file_size_limit = "50MiB"
# allowed_mime_types = ["image/png", "image/jpeg"]
# objects_path = "./images"
[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails.
site_url = "http://localhost"
# Force JWT issuer to match site_url
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["http://localhost", "http://127.0.0.1"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600
# If disabled, the refresh token will never expire.
enable_refresh_token_rotation = true
# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
# Requires enable_refresh_token_rotation = true.
refresh_token_reuse_interval = 10
# Allow/disallow new user signups to your project.
enable_signup = true
# Allow/disallow anonymous sign-ins to your project.
enable_anonymous_sign_ins = false
# Allow/disallow testing manual linking of accounts
enable_manual_linking = false
# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more.
minimum_password_length = 6
# Passwords that do not meet the following requirements will be rejected as weak. Supported values
# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols`
password_requirements = ""
# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`.
# [auth.captcha]
# enabled = true
# provider = "hcaptcha"
# secret = ""
[auth.email]
# Allow/disallow new user signups via email to your project.
enable_signup = true
# If enabled, a user will be required to confirm any email change on both the old, and new email
# addresses. If disabled, only the new email is required to confirm.
double_confirm_changes = true
# If enabled, users need to confirm their email address before signing in.
enable_confirmations = false
# If enabled, users will need to reauthenticate or have logged in recently to change their password.
secure_password_change = false
# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
max_frequency = "1s"
# Number of characters used in the email OTP.
otp_length = 6
# Number of seconds before the email OTP expires (defaults to 1 hour).
otp_expiry = 3600
# Use a production-ready SMTP server
# [auth.email.smtp]
# enabled = true
# host = "smtp.sendgrid.net"
# port = 587
# user = "apikey"
# pass = "env(SENDGRID_API_KEY)"
# admin_email = "admin@email.com"
# sender_name = "Admin"
# Uncomment to customize email template
# [auth.email.template.invite]
# subject = "You have been invited"
# content_path = "./supabase/templates/invite.html"
[auth.sms]
# Allow/disallow new user signups via SMS to your project.
enable_signup = false
# If enabled, users need to confirm their phone number before signing in.
enable_confirmations = false
# Template for sending OTP to users
template = "Your code is {{ .Code }}"
# Controls the minimum amount of time that must pass before sending another sms otp.
max_frequency = "5s"
# Use pre-defined map of phone number to OTP for testing.
# [auth.sms.test_otp]
# 4152127777 = "123456"
# Configure logged in session timeouts.
# [auth.sessions]
# Force log out after the specified duration.
# timebox = "24h"
# Force log out if the user has been inactive longer than the specified duration.
# inactivity_timeout = "8h"
# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
# [auth.hook.custom_access_token]
# enabled = true
# uri = "pg-functions://<database>/<schema>/<hook_name>"
# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
[auth.sms.twilio]
enabled = false
account_sid = ""
message_service_sid = ""
# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
# Multi-factor-authentication is available to Supabase Pro plan.
[auth.mfa]
# Control how many MFA factors can be enrolled at once per user.
max_enrolled_factors = 10
# Control MFA via App Authenticator (TOTP)
[auth.mfa.totp]
enroll_enabled = false
verify_enabled = false
# Configure MFA via Phone Messaging
[auth.mfa.phone]
enroll_enabled = false
verify_enabled = false
otp_length = 6
template = "Your code is {{ .Code }}"
max_frequency = "5s"
# Configure MFA via WebAuthn
# [auth.mfa.web_authn]
# enroll_enabled = true
# verify_enabled = true
# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
[auth.external.apple]
enabled = false
client_id = ""
# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
# Overrides the default auth redirectUrl.
redirect_uri = ""
# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
# or any other third-party OIDC providers.
url = ""
# If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
skip_nonce_check = false
# Use Firebase Auth as a third-party provider alongside Supabase Auth.
[auth.third_party.firebase]
enabled = false
# project_id = "my-firebase-project"
# Use Auth0 as a third-party provider alongside Supabase Auth.
[auth.third_party.auth0]
enabled = false
# tenant = "my-auth0-tenant"
# tenant_region = "us"
# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth.
[auth.third_party.aws_cognito]
enabled = false
# user_pool_id = "my-user-pool-id"
# user_pool_region = "us-east-1"
[edge_runtime]
enabled = true
# Configure one of the supported request policies: `oneshot`, `per_worker`.
# Use `oneshot` for hot reload, or `per_worker` for load testing.
policy = "oneshot"
# Port to attach the Chrome inspector for debugging edge functions.
inspector_port = 8083
# Use these configurations to customize your Edge Function.
# [functions.MY_FUNCTION_NAME]
# enabled = true
# verify_jwt = true
# import_map = "./functions/MY_FUNCTION_NAME/deno.json"
# Uncomment to specify a custom file path to the entrypoint.
# Supported file extensions are: .ts, .js, .mjs, .jsx, .tsx
# entrypoint = "./functions/MY_FUNCTION_NAME/index.ts"
# Specifies static files to be bundled with the function. Supports glob patterns.
# For example, if you want to serve static HTML pages in your function:
# static_files = [ "./functions/MY_FUNCTION_NAME/*.html" ]
[analytics]
enabled = true
port = 54327
# Configure one of the supported backends: `postgres`, `bigquery`.
backend = "postgres"
# Experimental features may be deprecated any time
[experimental]
# Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = ""
# Configures S3 bucket region, eg. us-east-1
s3_region = ""
# Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = ""
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = ""

View File

@ -1,31 +1,42 @@
BEGIN; BEGIN;
-- Create pg_net extension -- Create pg_net extension
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
-- Create supabase_functions schema -- Create supabase_functions schema
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; CREATE SCHEMA IF NOT EXISTS supabase_functions AUTHORIZATION postgres;
-- Grant basic permissions
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition -- supabase_functions.migrations definition
CREATE TABLE supabase_functions.migrations ( CREATE TABLE IF NOT EXISTS supabase_functions.migrations (
version text PRIMARY KEY, version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW() inserted_at timestamptz NOT NULL DEFAULT NOW()
); );
-- Initial supabase_functions migration -- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); INSERT INTO supabase_functions.migrations (version) VALUES ('initial') ON CONFLICT DO NOTHING;
-- supabase_functions.hooks definition -- supabase_functions.hooks definition
CREATE TABLE supabase_functions.hooks ( CREATE TABLE IF NOT EXISTS supabase_functions.hooks (
id bigserial PRIMARY KEY, id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL, hook_table_id integer NOT NULL,
hook_name text NOT NULL, hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(), created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint request_id bigint
); );
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); -- Create indexes if they don't exist
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX IF NOT EXISTS supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
CREATE FUNCTION supabase_functions.http_request()
-- Create the http_request function
CREATE OR REPLACE FUNCTION supabase_functions.http_request()
RETURNS trigger RETURNS trigger
LANGUAGE plpgsql LANGUAGE plpgsql
AS $function$ AS $function$
@ -100,109 +111,21 @@ BEGIN;
RETURN NEW; RETURN NEW;
END END
$function$; $function$;
-- Supabase super admin
DO -- Set function properties
$$ ALTER FUNCTION supabase_functions.http_request() SECURITY DEFINER;
BEGIN ALTER FUNCTION supabase_functions.http_request() SET search_path = supabase_functions;
IF NOT EXISTS (
SELECT 1 -- Grant execute permissions
FROM pg_roles
WHERE rolname = 'supabase_functions_admin'
)
THEN
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
END IF;
END
$$;
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
GRANT supabase_functions_admin TO postgres;
-- Remove unused supabase_pg_net_admin role
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = 'supabase_pg_net_admin'
)
THEN
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
DROP OWNED BY supabase_pg_net_admin;
DROP ROLE supabase_pg_net_admin;
END IF;
END
$$;
-- pg_net grants when extension is already enabled
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_extension
WHERE extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END
$$;
-- Event trigger for pg_net
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_event_trigger_ddl_commands() AS ev
JOIN pg_extension AS ext
ON ev.objid = ext.oid
WHERE ext.extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END;
$$;
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_event_trigger
WHERE evtname = 'issue_pg_net_access'
) THEN
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
EXECUTE PROCEDURE extensions.grant_pg_net_access();
END IF;
END
$$;
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
ALTER function supabase_functions.http_request() SECURITY DEFINER;
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
-- Grant pg_net permissions
GRANT USAGE ON SCHEMA net TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO postgres, anon, authenticated, service_role;
-- Add migration record
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants') ON CONFLICT DO NOTHING;
COMMIT; COMMIT;

View File

@ -1,5 +1,6 @@
\set jwt_secret `echo "$JWT_SECRET"` -- Set JWT configuration for the database
\set jwt_exp `echo "$JWT_EXP"` -- These settings will be configured through environment variables in the Supabase setup
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; -- Note: JWT configuration is handled by Supabase's internal configuration
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; -- This file is kept for reference but the actual JWT settings are managed
-- through the Supabase configuration and environment variables

View File

@ -1,8 +1,11 @@
-- NOTE: change to your own passwords for production environments -- NOTE: change to your own passwords for production environments
\set pgpass `echo "$POSTGRES_PASSWORD"` -- Password configuration is handled by Supabase's internal setup
-- This file is kept for reference but the actual password settings are managed
-- through the Supabase configuration and environment variables
ALTER USER authenticator WITH PASSWORD :'pgpass'; -- The following users are created and configured by Supabase automatically:
ALTER USER pgbouncer WITH PASSWORD :'pgpass'; -- - authenticator
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; -- - pgbouncer
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; -- - supabase_auth_admin
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; -- - supabase_functions_admin
-- - supabase_storage_admin

0
db/init/seed.sql Normal file
View File

View File

@ -1,25 +0,0 @@
-- Create Keycloak schema if it doesn't exist
create schema if not exists keycloak;
-- Create Keycloak user if it doesn't exist
do $$
begin
if not exists (select 1 from pg_roles where rolname = 'keycloak') then
create user keycloak with password 'keycloak';
end if;
end
$$;
-- Grant schema usage and ownership to Keycloak user
alter schema keycloak owner to keycloak;
grant usage on schema keycloak to keycloak;
-- Grant all privileges on all tables in keycloak schema to keycloak user
grant all privileges on all tables in schema keycloak to keycloak;
-- Grant all privileges on all sequences in keycloak schema to keycloak user
grant all privileges on all sequences in schema keycloak to keycloak;
-- Set default privileges for future tables and sequences
alter default privileges in schema keycloak grant all on tables to keycloak;
alter default privileges in schema keycloak grant all on sequences to keycloak;

View File

@ -1,7 +1,7 @@
--[ Database Schema Version ]-- --[ Database Schema Version ]--
-- Version: 1.0.0 -- Version: 1.0.0
-- Last Updated: 2024-02-24 -- Last Updated: 2024-02-24
-- Description: Core schema setup for ClassConcepts -- Description: Core schema setup for ClassConcepts with neoFS filesystem integration
-- Dependencies: auth.users (Supabase Auth) -- Dependencies: auth.users (Supabase Auth)
--[ Validation ]-- --[ Validation ]--
@ -47,108 +47,58 @@ begin
end; end;
$$ language plpgsql security definer; $$ language plpgsql security definer;
-- Create completed_at trigger function for document artefacts
create or replace function public.set_completed_at()
returns trigger as $$
begin
if NEW.status = 'completed' and OLD.status != 'completed' then
NEW.completed_at = now();
end if;
return NEW;
end;
$$ language plpgsql security definer;
--[ 5. Core Tables ]-- --[ 5. Core Tables ]--
-- Base user profiles -- Base user profiles
create table if not exists public.profiles ( create table if not exists public.profiles (
id uuid primary key references auth.users(id) on delete cascade, id uuid primary key references auth.users(id) on delete cascade,
email text not null unique, email text not null unique,
user_type text not null check (user_type in ('admin', 'email_teacher', 'email_student')), user_type text not null check (
user_type in (
'teacher',
'student',
'email_teacher',
'email_student',
'developer',
'superadmin'
)
),
username text not null unique, username text not null unique,
full_name text, full_name text,
display_name text, display_name text,
metadata jsonb default '{}'::jsonb, metadata jsonb default '{}'::jsonb,
user_db_name text,
school_db_name text,
neo4j_sync_status text default 'pending' check (neo4j_sync_status in ('pending', 'ready', 'failed')),
neo4j_synced_at timestamp with time zone,
last_login timestamp with time zone, last_login timestamp with time zone,
created_at timestamp with time zone default timezone('utc'::text, now()), created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()) updated_at timestamp with time zone default timezone('utc'::text, now())
); );
comment on table public.profiles is 'User profiles linked to Supabase auth.users'; comment on table public.profiles is 'User profiles linked to Supabase auth.users';
comment on column public.profiles.user_type is 'Type of user: admin, teacher, or student'; comment on column public.profiles.user_type is 'Type of user: teacher or student';
-- Institute import data
create table if not exists public.institute_imports (
id uuid primary key default uuid_generate_v4(),
urn text unique,
establishment_name text not null,
la_code text,
la_name text,
establishment_number text,
establishment_type text,
establishment_type_group text,
establishment_status text,
reason_establishment_opened text,
open_date date,
reason_establishment_closed text,
close_date date,
phase_of_education text,
statutory_low_age integer,
statutory_high_age integer,
boarders text,
nursery_provision text,
official_sixth_form text,
gender text,
religious_character text,
religious_ethos text,
diocese text,
admissions_policy text,
school_capacity integer,
special_classes text,
census_date date,
number_of_pupils integer,
number_of_boys integer,
number_of_girls integer,
percentage_fsm numeric(5,2),
trust_school_flag text,
trusts_name text,
school_sponsor_flag text,
school_sponsors_name text,
federation_flag text,
federations_name text,
ukprn text,
fehe_identifier text,
further_education_type text,
ofsted_last_inspection date,
last_changed_date date,
street text,
locality text,
address3 text,
town text,
county text,
postcode text,
school_website text,
telephone_num text,
head_title text,
head_first_name text,
head_last_name text,
head_preferred_job_title text,
gssla_code text,
parliamentary_constituency text,
urban_rural text,
rsc_region text,
country text,
uprn text,
sen_stat boolean,
sen_no_stat boolean,
sen_unit_on_roll integer,
sen_unit_capacity integer,
resourced_provision_on_roll integer,
resourced_provision_capacity integer,
metadata jsonb default '{}'::jsonb,
imported_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.institute_imports is 'Raw institute data imported from external sources';
-- Active institutes -- Active institutes
create table if not exists public.institutes ( create table if not exists public.institutes (
id uuid primary key default uuid_generate_v4(), id uuid primary key default uuid_generate_v4(),
import_id uuid references public.institute_imports(id),
name text not null, name text not null,
urn text unique, urn text unique,
status text not null default 'active' check (status in ('active', 'inactive', 'pending')), status text not null default 'active' check (status in ('active', 'inactive', 'pending')),
address jsonb default '{}'::jsonb, address jsonb default '{}'::jsonb,
website text, website text,
metadata jsonb default '{}'::jsonb, metadata jsonb default '{}'::jsonb,
neo4j_unique_id text, geo_coordinates jsonb default '{}'::jsonb,
neo4j_uuid_string text,
neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')), neo4j_public_sync_status text default 'pending' check (neo4j_public_sync_status in ('pending', 'synced', 'failed')),
neo4j_public_sync_at timestamp with time zone, neo4j_public_sync_at timestamp with time zone,
neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')), neo4j_private_sync_status text default 'not_started' check (neo4j_private_sync_status in ('not_started', 'pending', 'synced', 'failed')),
@ -157,14 +107,104 @@ create table if not exists public.institutes (
updated_at timestamp with time zone default timezone('utc'::text, now()) updated_at timestamp with time zone default timezone('utc'::text, now())
); );
comment on table public.institutes is 'Active institutes in the system'; comment on table public.institutes is 'Active institutes in the system';
comment on column public.institutes.geo_coordinates is 'Geospatial coordinates from OSM search (latitude, longitude, boundingbox)';
--[ 6. Relationship Tables ]-- --[ 6. neoFS Filesystem Tables ]--
-- File cabinets for organizing files
create table if not exists public.file_cabinets (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.file_cabinets is 'User file cabinets for organizing documents and files';
-- Files stored in cabinets
create table if not exists public.files (
id uuid primary key default uuid_generate_v4(),
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
name text not null,
path text not null,
bucket text default 'file-cabinets' not null,
created_at timestamp with time zone default timezone('utc'::text, now()),
mime_type text,
metadata jsonb default '{}'::jsonb,
size text,
category text generated always as (
case
when mime_type like 'image/%' then 'image'
when mime_type = 'application/pdf' then 'document'
when mime_type in ('application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document') then 'document'
when mime_type in ('application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') then 'spreadsheet'
when mime_type in ('application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation') then 'presentation'
when mime_type like 'audio/%' then 'audio'
when mime_type like 'video/%' then 'video'
else 'other'
end
) stored
);
comment on table public.files is 'Files stored in user cabinets with automatic categorization';
comment on column public.files.category is 'Automatically determined file category based on MIME type';
-- AI brains for processing files
create table if not exists public.brains (
id uuid primary key default uuid_generate_v4(),
user_id uuid not null references public.profiles(id) on delete cascade,
name text not null,
purpose text,
created_at timestamp with time zone default timezone('utc'::text, now())
);
comment on table public.brains is 'AI brains for processing and analyzing user files';
-- Brain-file associations
create table if not exists public.brain_files (
brain_id uuid not null references public.brains(id) on delete cascade,
file_id uuid not null references public.files(id) on delete cascade,
primary key (brain_id, file_id)
);
comment on table public.brain_files is 'Associations between AI brains and files for processing';
-- Document artefacts from file processing
create table if not exists public.document_artefacts (
id uuid primary key default uuid_generate_v4(),
file_id uuid references public.files(id) on delete cascade,
page_number integer default 0 not null,
type text not null,
rel_path text not null,
size_tag text,
language text,
chunk_index integer,
extra jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()),
status text default 'completed' not null check (status in ('pending', 'processing', 'completed', 'failed')),
started_at timestamp with time zone default timezone('utc'::text, now()),
completed_at timestamp with time zone,
error_message text
);
comment on table public.document_artefacts is 'Extracted artefacts from document processing';
comment on column public.document_artefacts.status is 'Extraction status: pending, processing, completed, or failed';
comment on column public.document_artefacts.started_at is 'Timestamp when extraction process started';
comment on column public.document_artefacts.completed_at is 'Timestamp when extraction process completed (success or failure)';
comment on column public.document_artefacts.error_message is 'Error details if extraction failed';
-- Function execution logs
create table if not exists public.function_logs (
id serial primary key,
file_id uuid references public.files(id) on delete cascade,
timestamp timestamp with time zone default timezone('utc'::text, now()),
step text,
message text,
data jsonb
);
comment on table public.function_logs is 'Logs of function executions and processing steps';
--[ 7. Relationship Tables ]--
-- Institute memberships -- Institute memberships
create table if not exists public.institute_memberships ( create table if not exists public.institute_memberships (
id uuid primary key default uuid_generate_v4(), id uuid primary key default uuid_generate_v4(),
profile_id uuid references public.profiles(id) on delete cascade, profile_id uuid references public.profiles(id) on delete cascade,
institute_id uuid references public.institutes(id) on delete cascade, institute_id uuid references public.institutes(id) on delete cascade,
role text not null check (role in ('admin', 'teacher', 'student')), role text not null check (role in ('teacher', 'student')),
tldraw_preferences jsonb default '{}'::jsonb, tldraw_preferences jsonb default '{}'::jsonb,
metadata jsonb default '{}'::jsonb, metadata jsonb default '{}'::jsonb,
created_at timestamp with time zone default timezone('utc'::text, now()), created_at timestamp with time zone default timezone('utc'::text, now()),
@ -186,7 +226,7 @@ create table if not exists public.institute_membership_requests (
); );
comment on table public.institute_membership_requests is 'Tracks requests to join institutes'; comment on table public.institute_membership_requests is 'Tracks requests to join institutes';
--[ 7. Audit Tables ]-- --[ 8. Audit Tables ]--
-- System audit logs -- System audit logs
create table if not exists public.audit_logs ( create table if not exists public.audit_logs (
id uuid primary key default uuid_generate_v4(), id uuid primary key default uuid_generate_v4(),
@ -198,3 +238,53 @@ create table if not exists public.audit_logs (
created_at timestamp with time zone default timezone('utc'::text, now()) created_at timestamp with time zone default timezone('utc'::text, now())
); );
comment on table public.audit_logs is 'System-wide audit trail for important operations'; comment on table public.audit_logs is 'System-wide audit trail for important operations';
--[ 9. Indexes ]--
-- Document artefacts indexes
create index if not exists idx_document_artefacts_file_status on public.document_artefacts(file_id, status);
create index if not exists idx_document_artefacts_file_type on public.document_artefacts(file_id, type);
create index if not exists idx_document_artefacts_status on public.document_artefacts(status);
-- File indexes
create index if not exists idx_files_cabinet_id on public.files(cabinet_id);
create index if not exists idx_files_mime_type on public.files(mime_type);
create index if not exists idx_files_category on public.files(category);
-- Brain indexes
create index if not exists idx_brains_user_id on public.brains(user_id);
--[ 10. Triggers ]--
-- Set completed_at when document artefact status changes to completed
create trigger trigger_set_completed_at
before update on public.document_artefacts
for each row
execute function public.set_completed_at();
-- Set updated_at on profile updates
create trigger trigger_profiles_updated_at
before update on public.profiles
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute updates
create trigger trigger_institutes_updated_at
before update on public.institutes
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_memberships updates
create trigger trigger_institute_memberships_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
-- Set updated_at on institute_membership_requests updates
create trigger trigger_institute_membership_requests_updated_at
before update on public.institute_memberships
for each row
execute function public.handle_updated_at();
--[ 11. Additional Indexes ]--
-- Index for geospatial queries
create index if not exists idx_institutes_geo_coordinates on public.institutes using gin(geo_coordinates);
create index if not exists idx_institutes_urn on public.institutes(urn);

View File

@ -1,6 +1,6 @@
--[ 8. Auth Functions ]-- --[ 8. Auth Functions ]--
-- Create a secure function to check admin status -- Create a secure function to check admin status
create or replace function auth.is_admin() create or replace function public.is_admin()
returns boolean as $$ returns boolean as $$
select coalesce( select coalesce(
(select true (select true
@ -12,32 +12,24 @@ returns boolean as $$
$$ language sql security definer; $$ language sql security definer;
-- Create a secure function to check super admin status -- Create a secure function to check super admin status
create or replace function auth.is_super_admin() create or replace function public.is_super_admin()
returns boolean as $$ returns boolean as $$
select coalesce( select coalesce(
(select role = 'supabase_admin' (select true
from auth.users from public.profiles
where id = auth.uid()), where id = auth.uid()
and user_type = 'admin'),
false false
); );
$$ language sql security definer; $$ language sql security definer;
-- Create public wrappers for the auth functions -- Create public wrapper functions
create or replace function public.is_admin() -- Note: These are now the main implementation functions, not wrappers
returns boolean as $$ -- The original auth schema functions have been moved to public schema
select auth.is_admin();
$$ language sql security definer;
create or replace function public.is_super_admin()
returns boolean as $$
select auth.is_super_admin();
$$ language sql security definer;
-- Grant execute permissions -- Grant execute permissions
grant execute on function public.is_admin to authenticated; grant execute on function public.is_admin to authenticated;
grant execute on function public.is_super_admin to authenticated; grant execute on function public.is_super_admin to authenticated;
grant execute on function auth.is_admin to authenticated;
grant execute on function auth.is_super_admin to authenticated;
-- Initial admin setup function -- Initial admin setup function
create or replace function public.setup_initial_admin(admin_email text) create or replace function public.setup_initial_admin(admin_email text)
@ -48,7 +40,7 @@ as $$
declare declare
result json; result json;
begin begin
-- Only allow this to run as service role or supabase_admin -- Only allow this to run as service role or superuser
if not ( if not (
current_user = 'service_role' current_user = 'service_role'
or exists ( or exists (
@ -84,7 +76,7 @@ $$;
-- Grant execute permissions -- Grant execute permissions
revoke execute on function public.setup_initial_admin from public; revoke execute on function public.setup_initial_admin from public;
grant execute on function public.setup_initial_admin to authenticated, service_role, supabase_admin; grant execute on function public.setup_initial_admin to authenticated, service_role;
-- Create RPC wrapper for REST API access -- Create RPC wrapper for REST API access
create or replace function rpc.setup_initial_admin(admin_email text) create or replace function rpc.setup_initial_admin(admin_email text)
@ -98,7 +90,7 @@ end;
$$; $$;
-- Grant execute permissions for RPC wrapper -- Grant execute permissions for RPC wrapper
grant execute on function rpc.setup_initial_admin to authenticated, service_role, supabase_admin; grant execute on function rpc.setup_initial_admin to authenticated, service_role;
--[ 9. Utility Functions ]-- --[ 9. Utility Functions ]--
-- Check if database is ready -- Check if database is ready

View File

@ -1,50 +1,20 @@
-- Enable RLS on storage.buckets -- Storage policies configuration for Supabase
alter table if exists storage.buckets enable row level security; -- Note: Storage bucket policies are managed by Supabase internally
-- This file provides guidance on what should be configured
-- Drop existing policies if they exist -- Storage bucket policies should be configured through:
drop policy if exists "Super admin has full access to buckets" on storage.buckets; -- 1. Supabase Dashboard > Storage > Policies
drop policy if exists "Users can create their own buckets" on storage.buckets; -- 2. Or via SQL with proper permissions (requires service_role or owner access)
drop policy if exists "Users can view their own buckets" on storage.buckets;
-- Create new policies with proper permissions -- Recommended policies for storage.buckets:
create policy "Super admin has full access to buckets" -- - Super admin has full access to buckets
on storage.buckets for all -- - Users can create their own buckets
using ( -- - Users can view their own buckets or public buckets
current_user = 'service_role'
or current_user = 'supabase_admin'
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Allow authenticated users to create buckets -- Recommended policies for storage.objects:
create policy "Users can create their own buckets" -- - Users can upload to buckets they own
on storage.buckets for insert -- - Users can view objects in public buckets
to authenticated -- - Users can manage objects in buckets they own
with check (
owner::text = auth.uid()::text
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Allow users to view buckets they own or public buckets -- Note: These policies require the service_role or appropriate permissions
create policy "Users can view their own buckets" -- to be applied to the storage schema tables
on storage.buckets for select
to authenticated
using (
owner::text = auth.uid()::text
or exists (
select 1 from public.profiles
where id = auth.uid()
and user_type = 'admin'
)
);
-- Grant necessary permissions
grant all on storage.buckets to authenticated;
grant all on storage.objects to authenticated;

View File

@ -1,31 +1,20 @@
-- Initial admin setup for ClassroomCopilot
-- This file handles basic database setup and permissions
-- Ensure uuid-ossp extension is enabled -- Ensure uuid-ossp extension is enabled
create extension if not exists "uuid-ossp" schema extensions; create extension if not exists "uuid-ossp" schema extensions;
-- Function to set up initial admin -- Grant basic permissions to authenticated users for public schema
create or replace function public.setup_initial_admin() -- Note: These permissions are granted to allow users to work with the application
returns void grant usage on schema public to authenticated;
language plpgsql
security definer
set search_path = public, extensions
as $$
begin
-- Check if admin already exists
if exists (
select 1 from public.profiles
where user_type = 'admin'
) then
return;
end if;
-- Grant necessary permissions
grant all on all tables in schema public to authenticated; grant all on all tables in schema public to authenticated;
grant all on all sequences in schema public to authenticated; grant all on all sequences in schema public to authenticated;
grant all on all functions in schema public to authenticated; grant all on all functions in schema public to authenticated;
end;
$$;
-- Execute the function -- Set default privileges for future objects
select public.setup_initial_admin(); alter default privileges in schema public grant all on tables to authenticated;
alter default privileges in schema public grant all on sequences to authenticated;
alter default privileges in schema public grant all on functions to authenticated;
-- Drop the function after execution -- Note: The setup_initial_admin function is defined in 62-functions-triggers.sql
drop function public.setup_initial_admin(); -- and should be called with an admin email parameter when needed

View File

@ -0,0 +1,95 @@
-- Files table augments and storage GC hooks
-- 1) Add columns to files if missing
do $$
begin
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='uploaded_by'
) then
alter table public.files add column uploaded_by uuid references public.profiles(id);
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='size_bytes'
) then
alter table public.files add column size_bytes bigint;
end if;
if not exists (
select 1 from information_schema.columns
where table_schema='public' and table_name='files' and column_name='source'
) then
alter table public.files add column source text default 'uploader-web';
end if;
end $$;
-- 2) Unique index for cabinet/path combo
create unique index if not exists uq_files_cabinet_path on public.files(cabinet_id, path);
-- 3) Storage GC helpers (ported from neoFS with storage schema)
create or replace function public._delete_storage_objects(p_bucket text, p_path text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_path is null then
return;
end if;
delete from storage.objects where bucket_id = p_bucket and name = p_path;
delete from storage.objects where bucket_id = p_bucket and name like p_path || '/%';
end
$$;
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if tg_op = 'DELETE' then
perform public._delete_storage_objects(old.bucket, old.path);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
end if;
end if;
return null;
end
$$;
-- 4) Attach GC trigger to files bucket/path changes
drop trigger if exists trg_files_gc on public.files;
create trigger trg_files_gc
after delete or update of bucket, path on public.files
for each row execute function public._storage_gc_sql();
-- 5) Document artefacts GC: remove artefact objects from storage when rows change/delete
create or replace function public._artefact_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_bucket text;
begin
if tg_op = 'DELETE' then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
return old;
elsif tg_op = 'UPDATE' then
if (old.rel_path is distinct from new.rel_path) or (old.file_id is distinct from new.file_id) then
select f.bucket into v_bucket from public.files f where f.id = old.file_id;
perform public._delete_storage_objects(v_bucket, old.rel_path);
end if;
return new;
end if;
end
$$;
drop trigger if exists trg_document_artefacts_gc on public.document_artefacts;
create trigger trg_document_artefacts_gc
before delete or update of file_id, rel_path on public.document_artefacts
for each row execute function public._artefact_gc_sql();

View File

@ -1,23 +0,0 @@
-- Create Keycloak user if it doesn't exist
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'keycloak') THEN
CREATE USER keycloak WITH PASSWORD 'keycloak';
END IF;
END
$$;
-- Create Keycloak schema if it doesn't exist
CREATE SCHEMA IF NOT EXISTS keycloak;
-- Grant necessary permissions
GRANT USAGE ON SCHEMA keycloak TO keycloak;
GRANT ALL ON ALL TABLES IN SCHEMA keycloak TO keycloak;
GRANT ALL ON ALL SEQUENCES IN SCHEMA keycloak TO keycloak;
-- Set default privileges for future tables
ALTER DEFAULT PRIVILEGES IN SCHEMA keycloak GRANT ALL ON TABLES TO keycloak;
ALTER DEFAULT PRIVILEGES IN SCHEMA keycloak GRANT ALL ON SEQUENCES TO keycloak;
-- Grant connect permission to the database
GRANT CONNECT ON DATABASE postgres TO keycloak;

View File

@ -0,0 +1,84 @@
-- Enable RLS and define policies for filesystem tables
-- 1) Enable RLS
alter table if exists public.file_cabinets enable row level security;
alter table if exists public.files enable row level security;
alter table if exists public.brain_files enable row level security;
alter table if exists public.document_artefacts enable row level security;
drop policy if exists "User can access own cabinets" on public.file_cabinets;
create policy "User can access own cabinets" on public.file_cabinets
using (user_id = auth.uid())
with check (user_id = auth.uid());
drop policy if exists "User can access files in own cabinet" on public.files;
create policy "User can access files in own cabinet" on public.files
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can insert files into own cabinet" on public.files;
create policy "User can insert files into own cabinet" on public.files for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can update files in own cabinet" on public.files;
create policy "User can update files in own cabinet" on public.files for update to authenticated
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
drop policy if exists "User can delete files from own cabinet" on public.files;
create policy "User can delete files from own cabinet" on public.files for delete
using (exists (
select 1 from public.file_cabinets c
where c.id = files.cabinet_id and c.user_id = auth.uid()
));
-- 4) Brain-files: allow linking owned files to owned brains
drop policy if exists "User can link files they own to their brains" on public.brain_files;
create policy "User can link files they own to their brains" on public.brain_files
using (
exists (select 1 from public.brains b where b.id = brain_files.brain_id and b.user_id = auth.uid())
and exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = brain_files.file_id and c.user_id = auth.uid()
)
)
with check (true);
-- 5) Document artefacts: allow reads to owners via file cabinet, writes via service_role
drop policy if exists "artefacts_read_by_owner" on public.document_artefacts;
create policy "artefacts_read_by_owner" on public.document_artefacts for select to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
drop policy if exists "artefacts_rw_service" on public.document_artefacts;
create policy "artefacts_rw_service" on public.document_artefacts to service_role
using (true) with check (true);
-- Allow owners to delete their artefacts (needed for cascades under RLS)
drop policy if exists "artefacts_delete_by_owner" on public.document_artefacts;
create policy "artefacts_delete_by_owner" on public.document_artefacts for delete to authenticated
using (exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = document_artefacts.file_id and c.user_id = auth.uid()
));
-- File vectors RLS and policies are defined in 67-vectors.sql after the table is created

View File

@ -0,0 +1,79 @@
-- Vectors: file_vectors table and similarity search function
-- 1) Ensure pgvector extension is available
create extension if not exists vector;
-- 2) File vectors table
create table if not exists public.file_vectors (
id bigint generated by default as identity primary key,
created_at timestamp with time zone default now() not null,
embedding public.vector,
metadata jsonb,
content text
);
-- 3) ANN index (skipped until embedding dimension is fixed)
-- To enable: set column type to public.vector(<dim>) and uncomment:
-- create index if not exists file_vectors_embedding_idx
-- on public.file_vectors using ivfflat (embedding public.vector_cosine_ops)
-- with (lists='100');
-- 3b) Enable RLS and set policies (moved here to avoid ordering issues)
alter table if exists public.file_vectors enable row level security;
drop policy if exists "vectors_read_by_owner" on public.file_vectors;
create policy "vectors_read_by_owner" on public.file_vectors for select to authenticated
using (coalesce((metadata->>'file_id')::uuid, null) is null or exists (
select 1 from public.files f join public.file_cabinets c on f.cabinet_id = c.id
where f.id = (metadata->>'file_id')::uuid and c.user_id = auth.uid()
));
drop policy if exists "vectors_rw_service" on public.file_vectors;
create policy "vectors_rw_service" on public.file_vectors to service_role
using (true) with check (true);
-- 4) Match function mirrored from neoFS (generic metadata mapping)
create or replace function public.match_file_vectors(
filter jsonb,
match_count integer,
query_embedding public.vector
)
returns table (
id bigint,
file_id uuid,
cabinet_id uuid,
artefact_type text,
artefact_is text,
original_path_prefix text,
original_filename text,
content text,
metadata jsonb,
similarity double precision
)
language sql stable as $$
select
fv.id,
nullif(fv.metadata->>'file_id','')::uuid as file_id,
nullif(fv.metadata->>'cabinet_id','')::uuid as cabinet_id,
nullif(fv.metadata->>'artefact_type','') as artefact_type,
nullif(fv.metadata->>'artefact_is','') as artefact_is,
nullif(fv.metadata->>'original_path_prefix','') as original_path_prefix,
nullif(fv.metadata->>'original_filename','') as original_filename,
fv.content,
fv.metadata,
1 - (fv.embedding <=> query_embedding) as similarity
from public.file_vectors fv
where
(coalesce(filter ? 'file_id', false) = false or (fv.metadata->>'file_id')::uuid = (filter->>'file_id')::uuid)
and (coalesce(filter ? 'cabinet_id', false) = false or (fv.metadata->>'cabinet_id')::uuid = (filter->>'cabinet_id')::uuid)
and (coalesce(filter ? 'artefact_type', false) = false or (fv.metadata->>'artefact_type') = (filter->>'artefact_type'))
and (coalesce(filter ? 'artefact_id', false) = false or (fv.metadata->>'artefact_id') = (filter->>'artefact_id'))
and (coalesce(filter ? 'original_path_prefix', false) = false or (fv.metadata->>'original_path_prefix') like (filter->>'original_path_prefix') || '%')
and (coalesce(filter ? 'original_path_prefix_ilike', false)= false or (fv.metadata->>'original_path_prefix') ilike (filter->>'original_path_prefix_ilike') || '%')
and (coalesce(filter ? 'original_filename', false) = false or (fv.metadata->>'original_filename') = (filter->>'original_filename'))
and (coalesce(filter ? 'original_filename_ilike', false)= false or (fv.metadata->>'original_filename') ilike (filter->>'original_filename_ilike'))
order by fv.embedding <=> query_embedding
limit greatest(coalesce(match_count, 10), 1)
$$;

View File

@ -0,0 +1,73 @@
-- Cabinet memberships for sharing access
create table if not exists public.cabinet_memberships (
id uuid default uuid_generate_v4() primary key,
cabinet_id uuid not null references public.file_cabinets(id) on delete cascade,
profile_id uuid not null references public.profiles(id) on delete cascade,
role text not null check (role in ('owner','editor','viewer')),
created_at timestamp with time zone default timezone('utc'::text, now()),
updated_at timestamp with time zone default timezone('utc'::text, now()),
unique(cabinet_id, profile_id)
);
create index if not exists idx_cabinet_memberships_cabinet on public.cabinet_memberships(cabinet_id);
create index if not exists idx_cabinet_memberships_profile on public.cabinet_memberships(profile_id);
-- Updated at trigger
drop trigger if exists trg_cabinet_memberships_updated_at on public.cabinet_memberships;
create trigger trg_cabinet_memberships_updated_at
before update on public.cabinet_memberships
for each row execute function public.handle_updated_at();
-- RLS and policies
alter table if exists public.cabinet_memberships enable row level security;
-- Members can select their own memberships; cabinet owners can also see memberships
drop policy if exists cm_read_self_or_owner on public.cabinet_memberships;
create policy cm_read_self_or_owner on public.cabinet_memberships for select to authenticated
using (
profile_id = auth.uid() or exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
)
);
-- Cabinet owners can insert memberships
drop policy if exists cm_insert_by_owner on public.cabinet_memberships;
create policy cm_insert_by_owner on public.cabinet_memberships for insert to authenticated
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can update memberships (e.g., role)
drop policy if exists cm_update_by_owner on public.cabinet_memberships;
create policy cm_update_by_owner on public.cabinet_memberships for update to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
))
with check (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Cabinet owners can delete memberships
drop policy if exists cm_delete_by_owner on public.cabinet_memberships;
create policy cm_delete_by_owner on public.cabinet_memberships for delete to authenticated
using (exists (
select 1 from public.file_cabinets c where c.id = cabinet_memberships.cabinet_id and c.user_id = auth.uid()
));
-- Extend access to cabinets/files for members (after table exists)
drop policy if exists "User can access cabinets via membership" on public.file_cabinets;
create policy "User can access cabinets via membership" on public.file_cabinets for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = file_cabinets.id and m.profile_id = auth.uid()
));
drop policy if exists "User can access files via membership" on public.files;
create policy "User can access files via membership" on public.files for select to authenticated
using (exists (
select 1 from public.cabinet_memberships m
where m.cabinet_id = files.cabinet_id and m.profile_id = auth.uid()
));

View File

@ -0,0 +1,48 @@
-- Ensure storage objects for all artefacts are removed when a file is deleted
-- by deleting the entire "cabinet_id/file_id" directory prefix in Storage.
-- Helper to delete all objects under a prefix
create or replace function public._delete_storage_prefix(p_bucket text, p_prefix text)
returns void
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
begin
if p_bucket is null or p_prefix is null then
return;
end if;
-- Delete any objects whose name starts with the prefix + '/'
delete from storage.objects where bucket_id = p_bucket and name like p_prefix || '/%';
-- In case an object exists exactly at the prefix (rare but safe)
delete from storage.objects where bucket_id = p_bucket and name = p_prefix;
end
$$;
-- Update file-level GC to also delete the parent directory prefix (cabinet_id/file_id)
create or replace function public._storage_gc_sql()
returns trigger
language plpgsql security definer
set search_path to 'public', 'storage'
as $$
declare
v_prefix text;
begin
-- Derive directory prefix from the file path by removing the last segment (filename)
-- Example: 'cabinet_id/file_id/filename.ext' -> 'cabinet_id/file_id'
v_prefix := regexp_replace(old.path, '/[^/]+$', '');
if tg_op = 'DELETE' then
-- Delete the original object and any artefacts under the file's directory
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
elsif tg_op = 'UPDATE' then
if (old.bucket is distinct from new.bucket) or (old.path is distinct from new.path) then
perform public._delete_storage_objects(old.bucket, old.path);
perform public._delete_storage_prefix(old.bucket, v_prefix);
end if;
end if;
return null;
end
$$;

View File

@ -0,0 +1,41 @@
-- Add directory support to files table
-- Migration: Add directory support for folder uploads
-- Add new columns to files table
ALTER TABLE files
ADD COLUMN IF NOT EXISTS is_directory BOOLEAN DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS parent_directory_id UUID REFERENCES files(id) ON DELETE CASCADE,
ADD COLUMN IF NOT EXISTS relative_path TEXT,
ADD COLUMN IF NOT EXISTS directory_manifest JSONB,
ADD COLUMN IF NOT EXISTS upload_session_id UUID,
ADD COLUMN IF NOT EXISTS processing_status TEXT DEFAULT 'uploaded' CHECK (processing_status IN ('uploaded', 'processing', 'completed', 'failed', 'queued'));
-- Create index for efficient directory queries
CREATE INDEX IF NOT EXISTS idx_files_parent_directory ON files(parent_directory_id);
CREATE INDEX IF NOT EXISTS idx_files_upload_session ON files(upload_session_id);
CREATE INDEX IF NOT EXISTS idx_files_processing_status ON files(processing_status);
CREATE INDEX IF NOT EXISTS idx_files_is_directory ON files(is_directory);
-- Create directory manifest structure
COMMENT ON COLUMN files.is_directory IS 'True if this record represents a directory/folder';
COMMENT ON COLUMN files.parent_directory_id IS 'ID of parent directory if this file is inside an uploaded folder';
COMMENT ON COLUMN files.relative_path IS 'Relative path within the uploaded directory structure';
COMMENT ON COLUMN files.directory_manifest IS 'JSON manifest of directory contents including file count, total size, structure';
COMMENT ON COLUMN files.upload_session_id IS 'Groups files uploaded together in a single directory upload session';
COMMENT ON COLUMN files.processing_status IS 'Simple status tracking without auto-processing';
-- Example directory_manifest structure:
-- {
-- "total_files": 15,
-- "total_size_bytes": 12345678,
-- "directory_structure": {
-- "documents/": {
-- "file1.pdf": {"size": 123456, "mime_type": "application/pdf"},
-- "subdirectory/": {
-- "file2.docx": {"size": 234567, "mime_type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document"}
-- }
-- }
-- },
-- "upload_timestamp": "2024-09-23T12:00:00Z",
-- "upload_method": "directory_picker"
-- }

View File

@ -1,3 +1,7 @@
\set pguser `echo "$POSTGRES_USER"` -- Create _supabase database for internal Supabase operations
-- This database is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual database creation is managed
-- through the Supabase configuration and environment variables
CREATE DATABASE _supabase WITH OWNER :pguser; -- Note: The _supabase database is created with the postgres user as owner
-- by default during Supabase initialization

View File

@ -1,5 +1,7 @@
\set pguser `echo "$POSTGRES_USER"` -- Create _analytics schema for Supabase analytics
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
\c _supabase -- Note: The _analytics schema is created in the _supabase database
create schema if not exists _analytics; -- with appropriate ownership during Supabase initialization
alter schema _analytics owner to :pguser;

View File

@ -1,5 +1,7 @@
\set pguser `echo "$POSTGRES_USER"` -- Create _supavisor schema for Supabase connection pooling
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
\c _supabase -- Note: The _supavisor schema is created in the _supabase database
create schema if not exists _supavisor; -- with appropriate ownership during Supabase initialization
alter schema _supavisor owner to :pguser;

View File

@ -1,4 +1,7 @@
\set pguser `echo "$POSTGRES_USER"` -- Create _realtime schema for Supabase realtime functionality
-- This schema is created automatically by Supabase's internal setup
-- This file is kept for reference but the actual schema creation is managed
-- through the Supabase configuration and environment variables
create schema if not exists _realtime; -- Note: The _realtime schema is created with appropriate ownership
alter schema _realtime owner to :pguser; -- during Supabase initialization

View File

@ -0,0 +1,16 @@
# Supabase Edge Functions
This document describes the available Edge Functions in this self-hosted Supabase instance.
## institute-geocoder
Institute address geocoding using SearXNG/OpenStreetMap
**Endpoints:**
- `/functions/v1/institute-geocoder`
- `/functions/v1/institute-geocoder/batch`
**Usage:** POST with institute_id and optional address data
**Dependencies:** SearXNG service, OpenStreetMap data

View File

@ -0,0 +1,391 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface BatchGeocodingRequest {
limit?: number
force_refresh?: boolean
institute_ids?: string[]
}
interface GeocodingResult {
institute_id: string
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: BatchGeocodingRequest = await req.json()
const limit = body.limit || 10
const forceRefresh = body.force_refresh || false
// Get institutes that need geocoding
let query = supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.not('import_id', 'is', null)
if (!forceRefresh) {
// Only get institutes without coordinates or with empty coordinates
query = query.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
}
if (body.institute_ids && body.institute_ids.length > 0) {
query = query.in('id', body.institute_ids)
}
const { data: institutes, error: fetchError } = await query.limit(limit)
if (fetchError) {
throw new Error(`Failed to fetch institutes: ${fetchError.message}`)
}
if (!institutes || institutes.length === 0) {
return new Response(
JSON.stringify({
success: true,
message: 'No institutes found that need geocoding',
processed: 0
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
console.log(`Processing ${institutes.length} institutes for geocoding`)
const results: GeocodingResult[] = []
let successCount = 0
let errorCount = 0
// Process institutes sequentially to avoid overwhelming the SearXNG service
let processedCount = 0
for (const institute of institutes) {
try {
const address = institute.address as any
if (!address) {
results.push({
institute_id: institute.id,
success: false,
message: 'No address information available',
error: 'Missing address data'
})
errorCount++
processedCount++
continue
}
// Build search query from address components
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
if (addressParts.length === 0) {
results.push({
institute_id: institute.id,
success: false,
message: 'No valid address components found',
error: 'Empty address parts'
})
errorCount++
processedCount++
continue
}
const searchQuery = addressParts.join(', ')
console.log(`Geocoding institute ${institute.id}: ${searchQuery}`)
// Query SearXNG for geocoding with fallback strategy
const geocodingResult = await geocodeAddressWithFallback(address, searxngUrl)
if (geocodingResult.success && geocodingResult.coordinates) {
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates.latitude,
longitude: geocodingResult.coordinates.longitude,
boundingbox: geocodingResult.coordinates.boundingbox,
geojson: geocodingResult.coordinates.geojson,
osm: geocodingResult.coordinates.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', institute.id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
results.push({
institute_id: institute.id,
success: true,
message: 'Successfully geocoded',
coordinates: geocodingResult.coordinates
})
successCount++
// Log the successful geocoding
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding',
message: 'Successfully geocoded institute address in batch',
data: {
institute_id: institute.id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
} else {
results.push({
institute_id: institute.id,
success: false,
message: 'Geocoding failed',
error: geocodingResult.error || 'Unknown error'
})
errorCount++
}
processedCount++
// Add a small delay between requests to be respectful to the SearXNG service
// Optimize delay based on batch size for better performance
if (processedCount < institutes.length) { // Don't delay after the last institute
const delay = institutes.length > 200 ? 50 : 100; // Faster processing for large batches
await new Promise(resolve => setTimeout(resolve, delay))
}
} catch (error) {
console.error(`Error processing institute ${institute.id}:`, error)
results.push({
institute_id: institute.id,
success: false,
message: 'Processing error',
error: error.message
})
errorCount++
}
}
// Log the batch operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding_complete',
message: 'Batch geocoding operation completed',
data: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount,
results: results
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Batch geocoding completed',
summary: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount
},
results: results
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in batch institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
try {
// Format search query for OSM
const osmQuery = `!osm ${searchQuery}`
const searchUrl = `${searxngUrl}/search?q=${encodeURIComponent(osmQuery)}&format=json`
const response = await fetch(searchUrl, {
method: 'GET',
headers: {
'Accept': 'application/json',
'User-Agent': 'ClassroomCopilot-BatchGeocoder/1.0'
}
})
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data = await response.json()
// Check if we have results - the number_of_results field might be unreliable
// so we check the results array directly
if (!data.results || data.results.length === 0) {
return {
success: false,
error: 'No results returned from SearXNG'
}
}
const result = data.results[0]
if (!result.latitude || !result.longitude) {
return {
success: false,
error: 'Missing latitude or longitude in SearXNG response'
}
}
return {
success: true,
coordinates: {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude),
boundingbox: result.boundingbox || [],
geojson: result.geojson,
osm: result.osm
}
}
} catch (error) {
console.error('Geocoding error:', error)
return {
success: false,
error: error.message
}
}
}
async function geocodeAddressWithFallback(address: any, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
// Strategy 1: Try full address (street + town + county + postcode)
if (address.street && address.town && address.county && address.postcode) {
const fullQuery = `${address.street}, ${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying full address: ${fullQuery}`)
const result = await geocodeAddress(fullQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Full address geocoding successful')
return result
}
}
// Strategy 2: Try town + county + postcode
if (address.town && address.county && address.postcode) {
const mediumQuery = `${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying medium address: ${mediumQuery}`)
const result = await geocodeAddress(mediumQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Medium address geocoding successful')
return result
}
}
// Strategy 3: Try just postcode
if (address.postcode) {
console.log(`Trying postcode only: ${address.postcode}`)
const result = await geocodeAddress(address.postcode, searxngUrl)
if (result.success && result.coordinates) {
console.log('Postcode geocoding successful')
return result
}
}
// Strategy 4: Try town + postcode
if (address.town && address.postcode) {
const simpleQuery = `${address.town}, ${address.postcode}`
console.log(`Trying simple address: ${simpleQuery}`)
const result = await geocodeAddress(simpleQuery, searxngUrl)
if (result.success && result.coordinates) {
console.log('Simple address geocoding successful')
return result
}
}
// All strategies failed
return {
success: false,
error: 'No coordinates found with any address combination'
}
}

View File

@ -0,0 +1,317 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface BatchGeocodingRequest {
limit?: number
force_refresh?: boolean
institute_ids?: string[]
}
interface GeocodingResult {
institute_id: string
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_RATE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: BatchGeocodingRequest = await req.json()
const limit = body.limit || 10
const forceRefresh = body.force_refresh || false
// Get institutes that need geocoding
let query = supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.not('import_id', 'is', null)
if (!forceRefresh) {
// Only get institutes without coordinates or with empty coordinates
query = query.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
}
if (body.institute_ids && body.institute_ids.length > 0) {
query = query.in('id', body.institute_ids)
}
const { data: institutes, error: fetchError } = await query.limit(limit)
if (fetchError) {
throw new Error(`Failed to fetch institutes: ${fetchError.message}`)
}
if (!institutes || institutes.length === 0) {
return new Response(
JSON.stringify({
success: true,
message: 'No institutes found that need geocoding',
processed: 0
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
console.log(`Processing ${institutes.length} institutes for geocoding`)
const results: GeocodingResult[] = []
let successCount = 0
let errorCount = 0
// Process institutes sequentially to avoid overwhelming the SearXNG service
for (const institute of institutes) {
try {
const address = institute.address as any
if (!address) {
results.push({
institute_id: institute.id,
success: false,
message: 'No address information available',
error: 'Missing address data'
})
errorCount++
continue
}
// Build search query from address components
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
if (addressParts.length === 0) {
results.push({
institute_id: institute.id,
success: false,
message: 'No valid address components found',
error: 'Empty address parts'
})
errorCount++
continue
}
const searchQuery = addressParts.join(', ')
console.log(`Geocoding institute ${institute.id}: ${searchQuery}`)
// Query SearXNG for geocoding
const geocodingResult = await geocodeAddress(searchQuery, searxngUrl)
if (geocodingResult.success && geocodingResult.coordinates) {
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates.latitude,
longitude: geocodingResult.coordinates.longitude,
boundingbox: geocodingResult.coordinates.boundingbox,
geojson: geocodingResult.coordinates.geojson,
osm: geocodingResult.coordinates.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', institute.id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
results.push({
institute_id: institute.id,
success: true,
message: 'Successfully geocoded',
coordinates: geocodingResult.coordinates
})
successCount++
// Log the successful geocoding
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding',
message: 'Successfully geocoded institute address in batch',
data: {
institute_id: institute.id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
} else {
results.push({
institute_id: institute.id,
success: false,
message: 'Geocoding failed',
error: geocodingResult.error || 'Unknown error'
})
errorCount++
}
// Add a small delay between requests to be respectful to the SearXNG service
await new Promise(resolve => setTimeout(resolve, 100))
} catch (error) {
console.error(`Error processing institute ${institute.id}:`, error)
results.push({
institute_id: institute.id,
success: false,
message: 'Processing error',
error: error.message
})
errorCount++
}
}
// Log the batch operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'batch_geocoding_complete',
message: 'Batch geocoding operation completed',
data: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount,
results: results
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Batch geocoding completed',
summary: {
total_processed: institutes.length,
successful: successCount,
failed: errorCount
},
results: results
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in batch institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<{
success: boolean
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}> {
try {
// Format search query for OSM
const osmQuery = `!osm ${searchQuery}`
const searchUrl = `${searxngUrl}/search?q=${encodeURIComponent(osmQuery)}&format=json`
const response = await fetch(searchUrl, {
method: 'GET',
headers: {
'Accept': 'application/json',
'User-Agent': 'ClassroomCopilot-BatchGeocoder/1.0'
}
})
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data = await response.json()
// Check if we have results - the number_of_results field might be unreliable
// so we check the results array directly
if (!data.results || data.results.length === 0) {
return {
success: false,
error: 'No results returned from SearXNG'
}
}
const result = data.results[0]
if (!result.latitude || !result.longitude) {
return {
success: false,
error: 'Missing latitude or longitude in SearXNG response'
}
}
return {
success: true,
coordinates: {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude),
boundingbox: result.boundingbox || [],
geojson: result.geojson,
osm: result.osm
}
}
} catch (error) {
console.error('Geocoding error:', error)
return {
success: false,
error: error.message
}
}
}

View File

@ -0,0 +1,315 @@
// Example usage of Institute Geocoder functions
// This file demonstrates how to integrate the geocoding functions in your frontend
import { createClient } from '@supabase/supabase-js'
// Initialize Supabase client
const supabaseUrl = process.env.NEXT_PUBLIC_SUPABASE_URL!
const supabaseAnonKey = process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!
const supabase = createClient(supabaseUrl, supabaseAnonKey)
// Types for institute data
interface Institute {
id: string
name: string
address: {
street?: string
town?: string
county?: string
postcode?: string
country?: string
}
geo_coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
search_query: string
geocoded_at: string
}
}
interface GeocodingResult {
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
}
error?: string
}
// 1. Geocode a single institute
export async function geocodeInstitute(instituteId: string): Promise<GeocodingResult> {
try {
const { data, error } = await supabase.functions.invoke('institute-geocoder', {
body: { institute_id: instituteId }
})
if (error) {
throw new Error(error.message)
}
return data
} catch (error) {
console.error('Geocoding failed:', error)
return {
success: false,
message: 'Geocoding failed',
error: error instanceof Error ? error.message : 'Unknown error'
}
}
}
// 2. Batch geocode multiple institutes
export async function batchGeocodeInstitutes(
limit: number = 10,
forceRefresh: boolean = false
): Promise<any> {
try {
const { data, error } = await supabase.functions.invoke('institute-geocoder/batch', {
body: {
limit,
force_refresh: forceRefresh
}
})
if (error) {
throw new Error(error.message)
}
return data
} catch (error) {
console.error('Batch geocoding failed:', error)
throw error
}
}
// 3. Get institutes that need geocoding
export async function getInstitutesNeedingGeocoding(): Promise<Institute[]> {
try {
const { data, error } = await supabase
.from('institutes')
.select('id, name, address, geo_coordinates')
.or('geo_coordinates.is.null,geo_coordinates.eq.{}')
.not('import_id', 'is', null)
if (error) {
throw new Error(error.message)
}
return data || []
} catch (error) {
console.error('Failed to fetch institutes:', error)
return []
}
}
// 4. Display institute on a map (example with Leaflet)
export function displayInstituteOnMap(
institute: Institute,
mapElement: HTMLElement
): void {
if (!institute.geo_coordinates) {
console.warn('Institute has no coordinates:', institute.name)
return
}
// This is a placeholder - you'd need to implement actual map rendering
// For example, using Leaflet, Mapbox, or Google Maps
const { latitude, longitude } = institute.geo_coordinates
console.log(`Displaying ${institute.name} at ${latitude}, ${longitude}`)
// Example map implementation:
// const map = L.map(mapElement).setView([latitude, longitude], 13)
// L.marker([latitude, longitude]).addTo(map).bindPopup(institute.name)
}
// 5. React component example
export function InstituteGeocoder() {
const [institutes, setInstitutes] = useState<Institute[]>([])
const [loading, setLoading] = useState(false)
const [geocodingProgress, setGeocodingProgress] = useState(0)
// Load institutes that need geocoding
useEffect(() => {
loadInstitutes()
}, [])
async function loadInstitutes() {
const data = await getInstitutesNeedingGeocoding()
setInstitutes(data)
}
// Geocode all institutes
async function geocodeAllInstitutes() {
setLoading(true)
setGeocodingProgress(0)
try {
const result = await batchGeocodeInstitutes(institutes.length, false)
if (result.success) {
setGeocodingProgress(100)
// Reload institutes to show updated coordinates
await loadInstitutes()
}
} catch (error) {
console.error('Batch geocoding failed:', error)
} finally {
setLoading(false)
}
}
// Geocode single institute
async function geocodeSingleInstitute(instituteId: string) {
try {
const result = await geocodeInstitute(instituteId)
if (result.success) {
// Reload institutes to show updated coordinates
await loadInstitutes()
}
} catch (error) {
console.error('Single geocoding failed:', error)
}
}
return (
<div className="institute-geocoder">
<h2>Institute Geocoding</h2>
<div className="controls">
<button
onClick={geocodeAllInstitutes}
disabled={loading || institutes.length === 0}
>
{loading ? 'Geocoding...' : `Geocode All (${institutes.length})`}
</button>
{loading && (
<div className="progress">
<div
className="progress-bar"
style={{ width: `${geocodingProgress}%` }}
/>
</div>
)}
</div>
<div className="institutes-list">
{institutes.map(institute => (
<div key={institute.id} className="institute-item">
<h3>{institute.name}</h3>
<p>
{institute.address.street && `${institute.address.street}, `}
{institute.address.town && `${institute.address.town}, `}
{institute.address.county && `${institute.address.county}, `}
{institute.address.postcode}
</p>
{institute.geo_coordinates ? (
<div className="coordinates">
<span>📍 {institute.geo_coordinates.latitude}, {institute.geo_coordinates.longitude}</span>
<span>Geocoded: {new Date(institute.geo_coordinates.geocoded_at).toLocaleDateString()}</span>
</div>
) : (
<button
onClick={() => geocodeSingleInstitute(institute.id)}
disabled={loading}
>
Geocode
</button>
)}
</div>
))}
</div>
</div>
)
}
// 6. Utility functions for working with coordinates
export class CoordinateUtils {
// Calculate distance between two points (Haversine formula)
static calculateDistance(
lat1: number,
lon1: number,
lat2: number,
lon2: number
): number {
const R = 6371 // Earth's radius in kilometers
const dLat = this.toRadians(lat2 - lat1)
const dLon = this.toRadians(lon2 - lon1)
const a =
Math.sin(dLat / 2) * Math.sin(dLat / 2) +
Math.cos(this.toRadians(lat1)) * Math.cos(this.toRadians(lat2)) *
Math.sin(dLon / 2) * Math.sin(dLon / 2)
const c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a))
return R * c
}
// Convert degrees to radians
private static toRadians(degrees: number): number {
return degrees * (Math.PI / 180)
}
// Check if coordinates are within a bounding box
static isWithinBounds(
lat: number,
lon: number,
bounds: [number, number, number, number] // [minLat, maxLat, minLon, maxLon]
): boolean {
return lat >= bounds[0] && lat <= bounds[1] &&
lon >= bounds[2] && lon <= bounds[3]
}
// Format coordinates for display
static formatCoordinates(lat: number, lon: number): string {
const latDir = lat >= 0 ? 'N' : 'S'
const lonDir = lon >= 0 ? 'E' : 'W'
return `${Math.abs(lat).toFixed(6)}°${latDir}, ${Math.abs(lon).toFixed(6)}°${lonDir}`
}
}
// 7. Example of using coordinates in Neo4j queries
export const neo4jQueries = {
// Create institute node with location
createInstituteWithLocation: `
CREATE (i:Institute {
id: $institute_id,
name: $name,
location: point({latitude: $latitude, longitude: $longitude})
})
RETURN i
`,
// Find institutes within radius
findInstitutesWithinRadius: `
MATCH (i:Institute)
WHERE distance(i.location, point({latitude: $centerLat, longitude: $centerLon})) < $radiusMeters
RETURN i, distance(i.location, point({latitude: $centerLat, longitude: $centerLon})) as distance
ORDER BY distance
`,
// Find institutes in bounding box
findInstitutesInBounds: `
MATCH (i:Institute)
WHERE i.location.latitude >= $minLat
AND i.location.latitude <= $maxLat
AND i.location.longitude >= $minLon
AND i.location.longitude <= $maxLon
RETURN i
`
}
export default {
geocodeInstitute,
batchGeocodeInstitutes,
getInstitutesNeedingGeocoding,
displayInstituteOnMap,
InstituteGeocoder,
CoordinateUtils,
neo4jQueries
}

View File

@ -0,0 +1,325 @@
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2'
const corsHeaders = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
interface GeocodingRequest {
institute_id: string
address?: string
street?: string
town?: string
county?: string
postcode?: string
country?: string
}
interface SearXNGResponse {
query: string
number_of_results: number
results: Array<{
title: string
longitude: string
latitude: string
boundingbox: string[]
geojson?: any
osm?: any
}>
}
interface GeocodingResult {
success: boolean
message: string
coordinates?: {
latitude: number
longitude: number
boundingbox: string[]
geojson?: any
osm?: any
}
error?: string
}
serve(async (req: Request) => {
// Handle CORS preflight requests
if (req.method === 'OPTIONS') {
return new Response('ok', { headers: corsHeaders })
}
try {
// Get environment variables
const supabaseUrl = Deno.env.get('SUPABASE_URL')
const supabaseServiceKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')
const searxngUrl = Deno.env.get('SEARXNG_URL') || 'https://search.kevlarai.com'
if (!supabaseUrl || !supabaseServiceKey) {
throw new Error('Missing required environment variables')
}
// Create Supabase client
const supabase = createClient(supabaseUrl, supabaseServiceKey)
// Parse request body
const body: GeocodingRequest = await req.json()
if (!body.institute_id) {
return new Response(
JSON.stringify({ error: 'institute_id is required' }),
{
status: 400,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Get institute data from database
const { data: institute, error: fetchError } = await supabase
.from('institutes')
.select('*')
.eq('id', body.institute_id)
.single()
if (fetchError || !institute) {
return new Response(
JSON.stringify({ error: 'Institute not found' }),
{
status: 404,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Build search query from address components
let searchQuery = ''
if (body.address) {
searchQuery = body.address
} else {
const addressParts = [
body.street,
body.town,
body.county,
body.postcode,
body.country
].filter(Boolean)
searchQuery = addressParts.join(', ')
}
// If no search query provided, try to build from institute data
if (!searchQuery && institute.address) {
const address = institute.address as any
const addressParts = [
address.street,
address.town,
address.county,
address.postcode,
address.country
].filter(Boolean)
searchQuery = addressParts.join(', ')
}
if (!searchQuery) {
return new Response(
JSON.stringify({ error: 'No address information available for geocoding' }),
{
status: 400,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Query SearXNG for geocoding
const geocodingResult = await geocodeAddressWithFallback(institute.address, searxngUrl)
if (!geocodingResult.success) {
return new Response(
JSON.stringify({
error: 'Geocoding failed',
details: geocodingResult.error
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
// Update institute with geospatial coordinates
const { error: updateError } = await supabase
.from('institutes')
.update({
geo_coordinates: {
latitude: geocodingResult.coordinates!.latitude,
longitude: geocodingResult.coordinates!.longitude,
boundingbox: geocodingResult.coordinates!.boundingbox,
geojson: geocodingResult.coordinates!.geojson,
osm: geocodingResult.coordinates!.osm,
search_query: searchQuery,
geocoded_at: new Date().toISOString()
}
})
.eq('id', body.institute_id)
if (updateError) {
throw new Error(`Failed to update institute: ${updateError.message}`)
}
// Log the geocoding operation
await supabase
.from('function_logs')
.insert({
file_id: null,
step: 'geocoding',
message: 'Successfully geocoded institute address',
data: {
institute_id: body.institute_id,
search_query: searchQuery,
coordinates: geocodingResult.coordinates
}
})
return new Response(
JSON.stringify({
success: true,
message: 'Institute geocoded successfully',
institute_id: body.institute_id,
coordinates: geocodingResult.coordinates
}),
{
status: 200,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
} catch (error) {
console.error('Error in institute geocoder:', error)
return new Response(
JSON.stringify({
error: 'Internal server error',
details: error.message
}),
{
status: 500,
headers: { ...corsHeaders, 'Content-Type': 'application/json' }
}
)
}
})
async function geocodeAddress(searchQuery: string, searxngUrl: string): Promise<GeocodingResult> {
try {
console.log(`Geocoding address: ${searchQuery}`)
// Build the SearXNG query
const query = `!osm ${searchQuery}`
const url = `${searxngUrl}/search?q=${encodeURIComponent(query)}&format=json`
console.log(`SearXNG URL: ${url}`)
const response = await fetch(url)
if (!response.ok) {
throw new Error(`SearXNG request failed: ${response.status} ${response.statusText}`)
}
const data: SearXNGResponse = await response.json()
console.log(`SearXNG response: ${JSON.stringify(data, null, 2)}`)
// Check if we have results
if (!data.results || data.results.length === 0) {
return {
success: false,
message: 'No results returned from SearXNG',
error: 'No results returned from SearXNG'
}
}
// Get the best result (first one)
const bestResult = data.results[0]
if (!bestResult.latitude || !bestResult.longitude) {
return {
success: false,
message: 'Result missing coordinates',
error: 'Result missing coordinates'
}
}
return {
success: true,
message: 'Geocoding successful',
coordinates: {
latitude: parseFloat(bestResult.latitude),
longitude: parseFloat(bestResult.longitude),
boundingbox: bestResult.boundingbox || [],
geojson: bestResult.geojson || null,
osm: bestResult.osm || null
}
}
} catch (error) {
console.error('Error in geocodeAddress:', error)
return {
success: false,
message: 'Geocoding failed',
error: error.message
}
}
}
async function geocodeAddressWithFallback(address: any, searxngUrl: string): Promise<GeocodingResult> {
// Strategy 1: Try full address (street + town + county + postcode)
if (address.street && address.town && address.county && address.postcode) {
const fullQuery = `${address.street}, ${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying full address: ${fullQuery}`)
const result = await geocodeAddress(fullQuery, searxngUrl)
if (result.success) {
console.log('Full address geocoding successful')
return result
}
}
// Strategy 2: Try town + county + postcode
if (address.town && address.county && address.postcode) {
const mediumQuery = `${address.town}, ${address.county}, ${address.postcode}`
console.log(`Trying medium address: ${mediumQuery}`)
const result = await geocodeAddress(mediumQuery, searxngUrl)
if (result.success) {
console.log('Medium address geocoding successful')
return result
}
}
// Strategy 3: Try just postcode
if (address.postcode) {
console.log(`Trying postcode only: ${address.postcode}`)
const result = await geocodeAddress(address.postcode, searxngUrl)
if (result.success) {
console.log('Postcode geocoding successful')
return result
}
}
// Strategy 4: Try town + postcode
if (address.town && address.postcode) {
const simpleQuery = `${address.town}, ${address.postcode}`
console.log(`Trying simple address: ${simpleQuery}`)
const result = await geocodeAddress(simpleQuery, searxngUrl)
if (result.success) {
console.log('Simple address geocoding successful')
return result
}
}
// All strategies failed
return {
success: false,
message: 'All geocoding strategies failed',
error: 'No coordinates found with any address combination'
}
}

View File

@ -0,0 +1,142 @@
// Test script for institute geocoder functions
// This can be run in the browser console or as a standalone test
interface TestCase {
name: string
address: string
expected_coords?: {
latitude: number
longitude: number
}
}
const testCases: TestCase[] = [
{
name: "10 Downing Street, London",
address: "10 Downing Street, London",
expected_coords: {
latitude: 51.5034878,
longitude: -0.1276965
}
},
{
name: "Buckingham Palace, London",
address: "Buckingham Palace, London",
expected_coords: {
latitude: 51.501364,
longitude: -0.124432
}
},
{
name: "Big Ben, London",
address: "Big Ben, London",
expected_coords: {
latitude: 51.499479,
longitude: -0.124809
}
}
]
async function testGeocoding() {
console.log("🧪 Starting Institute Geocoder Tests...")
for (const testCase of testCases) {
console.log(`\n📍 Testing: ${testCase.name}`)
try {
// Test the SearXNG service directly
const searchQuery = `!osm ${testCase.address}`
const searchUrl = `https://search.kevlarai.com/search?q=${encodeURIComponent(searchQuery)}&format=json`
console.log(`🔍 Searching: ${searchUrl}`)
const response = await fetch(searchUrl)
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const data = await response.json()
console.log(`📊 Results: ${data.number_of_results} found`)
if (data.results && data.results.length > 0) {
const result = data.results[0]
const coords = {
latitude: parseFloat(result.latitude),
longitude: parseFloat(result.longitude)
}
console.log(`✅ Coordinates: ${coords.latitude}, ${coords.longitude}`)
if (testCase.expected_coords) {
const latDiff = Math.abs(coords.latitude - testCase.expected_coords.latitude)
const lonDiff = Math.abs(coords.longitude - testCase.expected_coords.longitude)
if (latDiff < 0.01 && lonDiff < 0.01) {
console.log(`🎯 Accuracy: High (within 0.01 degrees)`)
} else if (latDiff < 0.1 && lonDiff < 0.1) {
console.log(`🎯 Accuracy: Medium (within 0.1 degrees)`)
} else {
console.log(`⚠️ Accuracy: Low (difference > 0.1 degrees)`)
}
}
if (result.boundingbox) {
console.log(`🗺️ Bounding Box: ${result.boundingbox.join(', ')}`)
}
if (result.geojson) {
console.log(`🗺️ GeoJSON: ${result.geojson.type} with ${result.geojson.coordinates?.[0]?.length || 0} points`)
}
} else {
console.log(`❌ No results found`)
}
} catch (error) {
console.error(`❌ Test failed: ${error.message}`)
}
}
console.log("\n🏁 Testing completed!")
}
// Test address parsing function
function testAddressParsing() {
console.log("\n🔧 Testing Address Parsing...")
const testAddresses = [
{
street: "10 Downing Street",
town: "London",
county: "Greater London",
postcode: "SW1A 2AA",
country: "United Kingdom"
},
{
street: "Buckingham Palace",
town: "London",
county: "Greater London",
postcode: "SW1A 1AA",
country: "United Kingdom"
}
]
for (const addr of testAddresses) {
const parts = [addr.street, addr.town, addr.county, addr.postcode, addr.country].filter(Boolean)
const searchQuery = parts.join(', ')
console.log(`📍 Address: ${searchQuery}`)
}
}
// Run tests if this script is executed directly
if (typeof window !== 'undefined') {
// Browser environment
window.testGeocoding = testGeocoding
window.testAddressParsing = testAddressParsing
console.log("🧪 Institute Geocoder tests loaded. Run testGeocoding() or testAddressParsing() to test.")
} else {
// Node.js environment
console.log("🧪 Institute Geocoder tests loaded.")
}
export { testGeocoding, testAddressParsing }