services: nginx: profiles: - core container_name: nginx-proxy-manager-${NGINX_MODE:-dev} image: 'jc21/nginx-proxy-manager:latest' ports: - '80:80' - '81:81' - '443:443' volumes: - ./cc-volumes/nginx-proxy-manager/${BUILD_OS}/${NGINX_MODE:-dev}/data:/data - ./cc-volumes/nginx-proxy-manager/${BUILD_OS}/${NGINX_MODE:-dev}/letsencrypt:/etc/letsencrypt - ./cc-volumes/nginx-proxy-manager/${BUILD_OS}/${NGINX_MODE:-dev}/snippets:/snippets:ro environment: TZ: Europe/London networks: - cc-network keycloak: profiles: - core - database container_name: keycloak-${NGINX_MODE:-dev} build: context: ./cc-volumes/keycloak/${NGINX_MODE:-dev}/docker dockerfile: Dockerfile.${BUILD_OS}.${NGINX_MODE:-dev} args: KC_BOOTSTRAP_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD} KC_BOOTSTRAP_ADMIN_USERNAME: ${KEYCLOAK_ADMIN} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} KC_DB: postgres KC_DB_URL: jdbc:postgresql://db:5432/postgres KC_DB_USERNAME: keycloak KC_DB_PASSWORD: keycloak KC_DB_SCHEMA: keycloak KC_HOSTNAME: ${KEYCLOAK_URL} KC_HOSTNAME_STRICT: "false" KC_PROXY_HEADERS: xforwarded KC_PROXY_PROTOCOL_ENABLED: "false" KC_HTTP_ENABLED: "true" KC_HTTPS_ENABLED: "false" KC_HOSTNAME_ADMIN: ${KEYCLOAK_ADMIN_URL} KC_HOSTNAME_DEBUG: "true" KC_HEALTH_ENABLED: "true" KC_HOSTNAME_BACKCHANNEL_DYNAMIC: "false" KC_METRICS_ENABLED: "true" KC_LOG_LEVEL: DEBUG KC_HTTP_RELATIVE_PATH: / depends_on: db: condition: service_healthy restart: unless-stopped ports: - "${KEYCLOAK_MANAGEMENT_PORT}:9000" - "${KEYCLOAK_PORT}:8080" - "${KEYCLOAK_SSL_PORT}:8443" volumes: - ./cc-volumes/keycloak/${NGINX_MODE:-dev}/conf:/opt/keycloak/conf:ro - ./cc-volumes/keycloak/${NGINX_MODE:-dev}/providers:/opt/keycloak/providers:ro - ./cc-volumes/keycloak/${NGINX_MODE:-dev}/themes:/opt/keycloak/themes:ro - ./cc-volumes/keycloak/${NGINX_MODE:-dev}/master-realm-${NGINX_MODE:-dev}-${BUILD_OS}.json:/opt/keycloak/data/import/master-realm.json:ro - ./cc-volumes/keycloak/${NGINX_MODE:-dev}/classroomcopilot-realm-${NGINX_MODE:-dev}-${BUILD_OS}.json:/opt/keycloak/data/import/classroomcopilot-realm.json:ro networks: - cc-network oauth2-proxy-admin: image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 container_name: oauth2-proxy-admin restart: unless-stopped environment: OAUTH2_PROXY_PROVIDER: oidc OAUTH2_PROXY_OIDC_ISSUER_URL: https://keycloak.classroomcopilot.test/realms/classroomcopilot OAUTH2_PROXY_CLIENT_ID: admin-app OAUTH2_PROXY_CLIENT_SECRET: ${KEYCLOAK_SECRET_ADMIN} OAUTH2_PROXY_COOKIE_SECRET: ${COOKIE_SECRET_ADMIN} OAUTH2_PROXY_COOKIE_DOMAIN: .classroomcopilot.test OAUTH2_PROXY_UPSTREAMS: http://cc-admin:3000 OAUTH2_PROXY_REDIRECT_URL: https://admin.classroomcopilot.test/oauth2/callback OAUTH2_PROXY_EMAIL_DOMAINS: "*" OAUTH2_PROXY_ALLOWED_GROUPS: "admin" OAUTH2_PROXY_SKIP_PROVIDER_BUTTON: "true" OAUTH2_PROXY_PASS_ACCESS_TOKEN: "true" OAUTH2_PROXY_SET_XAUTHREQUEST: "true" ports: - "4181:4180" networks: - cc-network whisperlive-frontend: profiles: - core - frontend container_name: whisperlive-frontend-${NGINX_MODE:-dev} build: context: . dockerfile: ./whisperlive-frontend/Dockerfile args: BUILD_OS: ${BUILD_OS} NGINX_MODE: ${NGINX_MODE} environment: - VITE_APP_URL=${APP_URL} - VITE_APP_PROTOCOL=${APP_PROTOCOL} - VITE_APP_NAME=${APP_NAME} - VITE_DEV=${DEV_MODE} - VITE_WHISPERLIVE_URL=${WHISPERLIVE_URL} ports: - "${PORT_WHISPERLIVE_FRONTEND}:${PORT_WHISPERLIVE_FRONTEND}" - "${PORT_WHISPERLIVE_FRONTEND_SSL}:${PORT_WHISPERLIVE_FRONTEND_SSL}" volumes: - ./whisperlive-frontend:/app - /app/node_modules - ./cc-volumes/whisperlive/frontend/ssl/fullchain1.pem:/etc/nginx/ssl/fullchain.pem:ro - ./cc-volumes/whisperlive/frontend/ssl/privkey1.pem:/etc/nginx/ssl/privkey.pem:ro networks: - cc-network whisperlive-win: profiles: - none container_name: whisperlive-${NGINX_MODE:-dev} build: context: ./WhisperLive/server dockerfile: Dockerfile.${NGINX_MODE:-dev} args: PORT_WHISPERLIVE: ${PORT_WHISPERLIVE} PORT_WHISPERLIVE_SSL: ${PORT_WHISPERLIVE_SSL} WHISPERLIVE_SSL: ${WHISPERLIVE_SSL:-false} WHISPERLIVE_MODEL: ${WHISPERLIVE_MODEL:-base} env_file: - .env environment: WHISPERLIVE_SSL: ${WHISPERLIVE_SSL:-false} LOG_PATH: /app/logs NVIDIA_VISIBLE_DEVICES: all NVIDIA_DRIVER_CAPABILITIES: compute,utility volumes: - ./cc-volumes/whisperlive/models:/app/models - ./cc-volumes/whisperlive/${NGINX_MODE:-dev}/ssl:/app/ssl - ./local/logs/whisperlive:/app/logs deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] ports: - ${PORT_WHISPERLIVE}:${PORT_WHISPERLIVE} - ${PORT_WHISPERLIVE_SSL}:${PORT_WHISPERLIVE_SSL} networks: - cc-network whisperlive-macos: profiles: - core container_name: whisperlive-${NGINX_MODE:-dev} build: context: ./WhisperLive/server dockerfile: Dockerfile.${BUILD_OS}.${NGINX_MODE:-dev} args: PORT_WHISPERLIVE: ${PORT_WHISPERLIVE} PORT_WHISPERLIVE_SSL: ${PORT_WHISPERLIVE_SSL} WHISPERLIVE_SSL: ${WHISPERLIVE_SSL:-false} WHISPL_USE_CUSTOM_MODEL: ${WHISPL_USE_CUSTOM_MODEL:-false} FASTERWHISPER_MODEL: ${FASTERWHISPER_MODEL:-base} env_file: - .env environment: WHISPERLIVE_SSL: ${WHISPERLIVE_SSL:-false} LOG_PATH: /app/logs NVIDIA_VISIBLE_DEVICES: all NVIDIA_DRIVER_CAPABILITIES: compute,utility volumes: - ./local/data/whisperlive/models:/app/models - ./local/data/whisperlive/auto-download:/root/.cache/huggingface/hub - ./cc-volumes/whisperlive/${NGINX_MODE:-dev}/ssl:/app/ssl - ./local/logs/whisperlive:/app/logs deploy: resources: limits: cpus: '4' memory: 8G ports: - ${PORT_WHISPERLIVE}:${PORT_WHISPERLIVE} - ${PORT_WHISPERLIVE_SSL}:${PORT_WHISPERLIVE_SSL} networks: - cc-network whisperlive-cpu: profiles: - none container_name: whisperlive-cpu-${NGINX_MODE:-dev} image: ghcr.io/collabora/whisperlive-cpu:latest environment: LOG_PATH: /app/logs volumes: - ./cc-volumes/whisperlive/models:/app/models - ./cc-volumes/whisperlive/${NGINX_MODE:-dev}/ssl:/app/ssl - ./local/logs/whisperlive-cpu:/app/logs deploy: resources: limits: cpus: '4' memory: 8G ports: - ${PORT_WHISPERLIVE}:9090 networks: - cc-network whisperlive-gpu: profiles: - none container_name: whisperlive-gpu-${NGINX_MODE:-dev} image: ghcr.io/collabora/whisperlive-gpu:latest environment: LOG_PATH: /app/logs NVIDIA_VISIBLE_DEVICES: all NVIDIA_DRIVER_CAPABILITIES: compute,utility volumes: - ./cc-volumes/whisperlive/models:/app/models - ./cc-volumes/whisperlive/${NGINX_MODE:-dev}/ssl:/app/ssl - ./local/logs/whisperlive-gpu:/app/logs deploy: resources: limits: cpus: '4' memory: 16G ports: - ${PORT_WHISPERLIVE}:9090 networks: - cc-network solid-proxy-internal: profiles: - core container_name: solid-proxy-internal-${NGINX_MODE:-dev} image: nginx:alpine ports: - 3007:3007 volumes: - ./cc-volumes/solid-css/${NGINX_MODE:-dev}/nginx/solid-internal.conf:/etc/nginx/conf.d/default.conf:ro - ./cc-volumes/cloudflare-origin-certs/solid_cc_cert.pem:/etc/nginx/ssl/cert.pem:ro - ./cc-volumes/cloudflare-origin-certs/solid_cc_key.pem:/etc/nginx/ssl/key.pem:ro - ./local/logs/${NGINX_MODE:-dev}/solid-proxy-internal:/var/log/nginx networks: - cc-network cc-marketing-site: profiles: - core - frontend container_name: cc-marketing-${NGINX_MODE:-dev} build: context: ./cc-marketing dockerfile: Dockerfile.${NGINX_MODE:-dev} env_file: - .env environment: - VITE_APP_URL=${APP_URL} - VITE_APP_SITE_URL=${SITE_URL} - VITE_APP_APP_URL=${APP_URL} someone check ports: - "${PORT_MARKETING_SITE}:${PORT_MARKETING_SITE}" - "${PORT_MARKETING_SITE_SSL}:${PORT_MARKETING_SITE_SSL}" networks: - cc-network frontend: profiles: - core - frontend container_name: frontend-${NGINX_MODE:-dev} build: context: ./frontend dockerfile: Dockerfile.${NGINX_MODE:-dev} args: VITE_APP_URL: ${VITE_APP_URL} environment: - VITE_FRONTEND_SITE_URL=${SITE_URL} - VITE_APP_PROTOCOL=${APP_PROTOCOL} - VITE_APP_NAME=${APP_NAME} - VITE_SUPER_ADMIN_EMAIL=${APP_AUTHOR_EMAIL} - VITE_DEV=${DEV_MODE} - VITE_SUPABASE_URL=${SUPABASE_URL} - VITE_SUPABASE_ANON_KEY=${ANON_KEY} - VITE_STRICT_MODE=${STRICT_MODE} - APP_URL=${APP_URL} - PORT_FRONTEND=${PORT_FRONTEND} ports: - "${PORT_FRONTEND}:${PORT_FRONTEND}" volumes: - ./frontend:/app - /app/node_modules networks: - cc-network storybook: profiles: - core - frontend container_name: storybook-${NGINX_MODE:-dev} build: context: ./frontend dockerfile: Dockerfile.storybook.macos.${NGINX_MODE:-dev} environment: - NODE_ENV=${NGINX_MODE:-dev} ports: - "${PORT_STORYBOOK:-6006}:6006" volumes: - ./frontend:/app - /app/node_modules networks: - cc-network depends_on: - frontend cc-admin: profiles: - core - frontend container_name: cc-admin-${NGINX_MODE:-dev} build: context: ./cc-admin dockerfile: Dockerfile.${NGINX_MODE:-dev} args: PORT: ${PORT_CC_ADMIN} PORT_DEVTOOLS: ${PORT_CC_ADMIN_DEVTOOLS} SUPABASE_URL: ${SUPABASE_URL} ANON_KEY: ${ANON_KEY} SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} VITE_CC_ADMIN_URL: ${CC_ADMIN_URL} environment: APP_URL: ${APP_URL} PORT_CC_ADMIN: ${PORT_CC_ADMIN} PORT_CC_ADMIN_DEVTOOLS: ${PORT_CC_ADMIN_DEVTOOLS} env_file: - .env - ./cc-admin/.env.${NGINX_MODE:-dev} ports: - "${PORT_CC_ADMIN}:${PORT_CC_ADMIN}" volumes: - ./cc-admin:/app - /app/node_modules networks: - cc-network backend: profiles: - core - backend container_name: backend-${NGINX_MODE:-dev} build: context: ./backend dockerfile: Dockerfile.${BUILD_OS}.${NGINX_MODE:-dev} env_file: - .env environment: ADMIN_EMAIL: ${SUPER_ADMIN_EMAIL} ADMIN_PASSWORD: ${SUPER_ADMIN_PASSWORD} ADMIN_NAME: ${SUPER_ADMIN_NAME} ADMIN_USERNAME: ${SUPER_ADMIN_USERNAME} ADMIN_DISPLAY_NAME: ${SUPER_ADMIN_DISPLAY_NAME} SUPABASE_URL: ${SUPABASE_URL} SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: ${POSTGRES_DB} UVICORN_TIMEOUT: 300 volumes: - /var/run/docker.sock:/var/run/docker.sock - ./backend/:/app/backend - ./cc-volumes/init:/init:rw - ./local/logs/container/backend:/logs - ./local/input:/app/local/input:rw - ./local/output:/app/local/output:rw ports: - "${PORT_BACKEND}:${PORT_BACKEND}" extra_hosts: - "supa.classroomcopilot.test:172.23.0.1" networks: - cc-network deploy: resources: limits: cpus: '2' memory: 4G tldraw-sync: profiles: - core - backend container_name: tldraw-sync-${NGINX_MODE:-dev} build: context: ./tldraw-sync dockerfile: Dockerfile env_file: - .env environment: - LOG_PATH=/app/logs ports: - "5002:5002" volumes: - ./tldraw-sync:/app - ./cc-volumes/tldraw-sync/bunfig.toml:/app/bunfig.toml:ro - ./local/data/tldraw-sync/.assets:/app/.assets - ./local/data/tldraw-sync/.rooms:/app/.rooms - ./local/logs/container/tldraw-sync:/app/logs networks: - cc-network neo4j: profiles: - database - backend image: neo4j:enterprise container_name: neo4j-${NGINX_MODE:-dev} env_file: - .env environment: - NEO4J_ACCEPT_LICENSE_AGREEMENT=yes - NEO4J_PLUGINS='["apoc"]' ports: - ${PORT_NEO4J_HTTP}:${PORT_NEO4J_HTTP} - ${PORT_NEO4J_HTTPS}:${PORT_NEO4J_HTTPS} - ${PORT_NEO4J_BOLT}:${PORT_NEO4J_BOLT} volumes: - neo4j-data:/data - neo4j-logs:/logs - ./cc-volumes/neo4j/conf/${NGINX_MODE:-dev}/neo4j.conf:/conf/neo4j.conf:ro - ./cc-volumes/cloudflare-origin-certs/graph_cc_key.pem:/certificates/https/private.key:ro - ./cc-volumes/cloudflare-origin-certs/graph_cc_cert.pem:/certificates/https/public.crt:ro - ./cc-volumes/letsencrypt-certs/bolt.classroomcopilot/privkey1.pem:/certificates/bolt/private.key:ro - ./cc-volumes/letsencrypt-certs/bolt.classroomcopilot/fullchain1.pem:/certificates/bolt/public.crt:ro - ./cc-volumes/letsencrypt-certs/bolt.classroomcopilot/fullchain1.pem:/certificates/bolt/trusted/public.crt:ro - ./cc-volumes/neo4j/plugins:/plugins:rw - ./local/logs/container/neo4j:/logs healthcheck: test: ["CMD-SHELL", "neo4j status || exit 1"] interval: 10s timeout: 5s retries: 10 networks: - cc-network solid-css: profiles: - solid image: solidproject/community-server:latest container_name: solid-css-${NGINX_MODE:-dev} restart: unless-stopped ports: - "${PORT_SOLID_CSS}:3000" volumes: - ./cc-volumes/solid-css/${NGINX_MODE:-dev}/config:/config:ro - ./cc-volumes/solid-css/${NGINX_MODE:-dev}/data:/data command: - --config - /config/docker.json networks: - cc-network redis: profiles: - database - backend image: redis:alpine container_name: redis-${NGINX_MODE:-dev} networks: - cc-network ports: - "${PORT_REDIS:-6379}:6379" command: redis-server --appendonly yes volumes: - redis-data:/data searxng: profiles: - core - services - backend image: searxng/searxng container_name: searxng-${NGINX_MODE:-dev} ports: - "${PORT_SEARXNG}:${PORT_SEARXNG}" env_file: - .env volumes: - ./cc-volumes/searxng/limiter.toml:/etc/searxng/limiter.toml - ./cc-volumes/searxng/settings.yml:/etc/searxng/settings.yml networks: - cc-network mailhog: profiles: - core container_name: mailhog-${NGINX_MODE:-dev} image: mailhog/mailhog ports: - "${PORT_MAILHOG_SMTP}:1025" # SMTP port - "${PORT_MAILHOG_WEB}:8025" # Web UI port env_file: - .env volumes: - ./local/logs/mailhog:/var/mailhog - ./local/data/mailhog:/var/mailhog/mailhog networks: - cc-network postfix: profiles: - prod image: catatnight/postfix environment: - maildomain=${APP_URL} - smtp_user=user:password ports: - "25:25" minecraft-server: profiles: - none image: itzg/minecraft-server container_name: cc-minecraft-forge-${NGINX_MODE:-dev} environment: EULA: "TRUE" TYPE: VANILLA ONLINE_MODE: "false" PROXY: "minecraft.kevlarai.com" # ✅ Set custom server host details MOTD: "Welcome to KevlarAI's Minecraft Forge Server" # ✅ Optional extras (customize as desired) MAX_PLAYERS: 20 ALLOW_NETHER: "TRUE" ENABLE_COMMAND_BLOCK: "TRUE" DIFFICULTY: "normal" MODE: "survival" LEVEL_TYPE: "minecraft:default" LEVEL: "world" PVP: "TRUE" ports: - 25575:25575 - 25565:25565 volumes: - ./cc-volumes/minecraft/${NGINX_MODE:-dev}/vanilla/data:/data restart: unless-stopped networks: - cc-network # Supabase containers studio: profiles: - database - supabase container_name: supabase-studio-${NGINX_MODE:-dev} image: supabase/studio:20250113-83c9420 restart: unless-stopped healthcheck: test: [ "CMD", "node", "-e", "fetch('http://studio:3000/api/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})", ] timeout: 10s interval: 5s retries: 3 depends_on: analytics: condition: service_healthy ports: - ${PORT_SUPABASE_STUDIO}:3000 env_file: - .env environment: STUDIO_PG_META_URL: http://meta:8080 POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} DEFAULT_PROJECT_ID: "ClassroomCopilot" DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} OPENAI_API_KEY: ${OPENAI_API_KEY:-} SUPABASE_URL: ${SUPABASE_URL} SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} LOGFLARE_URL: http://analytics:4000 NEXT_PUBLIC_ENABLE_LOGS: true NEXT_ANALYTICS_BACKEND_PROVIDER: postgres networks: - cc-network kong: profiles: - database - supabase container_name: supabase-kong-${NGINX_MODE:-dev} image: kong:2.8.1 restart: unless-stopped entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' ports: - ${KONG_HTTP_PORT}:8000/tcp - ${KONG_HTTPS_PORT}:8443/tcp depends_on: analytics: condition: service_healthy env_file: - .env environment: KONG_DATABASE: "off" KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml KONG_DNS_ORDER: LAST,A,CNAME KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} KONG_PROXY_ACCESS_LOG: "/dev/stdout" KONG_ADMIN_ACCESS_LOG: "/dev/stdout" KONG_PROXY_ERROR_LOG: "/dev/stderr" KONG_ADMIN_ERROR_LOG: "/dev/stderr" KONG_CORS_ORIGINS: "*" KONG_CORS_METHODS: "GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS" KONG_CORS_HEADERS: "DNT,X-Auth-Token,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,apikey,x-client-info" KONG_CORS_EXPOSED_HEADERS: "Content-Length,Content-Range" KONG_CORS_MAX_AGE: 3600 volumes: - ./supabase/api/kong.yml:/home/kong/temp.yml:ro networks: - cc-network auth: profiles: - database - supabase container_name: supabase-auth-${NGINX_MODE:-dev} image: supabase/gotrue:v2.167.0 depends_on: db: condition: service_healthy analytics: condition: service_healthy healthcheck: test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health", ] timeout: 5s interval: 5s retries: 3 restart: unless-stopped env_file: - .env environment: GOTRUE_API_HOST: 0.0.0.0 GOTRUE_API_PORT: 9999 API_EXTERNAL_URL: ${API_EXTERNAL_URL} GOTRUE_DB_DRIVER: postgres GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} GOTRUE_SITE_URL: ${SITE_URL} GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} GOTRUE_JWT_ADMIN_ROLES: service_role GOTRUE_JWT_AUD: authenticated GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated GOTRUE_JWT_EXP: ${JWT_EXPIRY} GOTRUE_JWT_SECRET: ${JWT_SECRET} GOTRUE_LOG_LEVEL: ${AUTH_LOG_LEVEL} GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} GOTRUE_SMTP_HOST: ${SMTP_HOST} GOTRUE_SMTP_PORT: ${SMTP_PORT} GOTRUE_SMTP_USER: ${SMTP_USER} GOTRUE_SMTP_PASS: ${SMTP_PASS} GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: ${MAILER_SECURE_EMAIL_CHANGE_ENABLED} GOTRUE_MAILER_EXTERNAL_HOSTS: "localhost,admin.localhost,kong,supabase.classroomcopilot.ai,classroomcopilot.ai" GOTRUE_MAILER_EXTERNAL_HOSTS_ALLOW_REGEX: ".*\\.classroomcopilot\\.ai$" GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} GOTRUE_EXTERNAL_AZURE_ENABLED: ${AZURE_ENABLED} GOTRUE_EXTERNAL_AZURE_CLIENT_ID: ${AZURE_CLIENT_ID} GOTRUE_EXTERNAL_AZURE_SECRET: ${AZURE_SECRET} GOTRUE_EXTERNAL_AZURE_REDIRECT_URI: ${AZURE_REDIRECT_URI} networks: - cc-network rest: profiles: - database - supabase container_name: supabase-rest-${NGINX_MODE:-dev} image: postgrest/postgrest:v12.2.0 depends_on: db: condition: service_healthy analytics: condition: service_healthy restart: unless-stopped env_file: - .env environment: PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} PGRST_DB_ANON_ROLE: anon PGRST_JWT_SECRET: ${JWT_SECRET} PGRST_DB_USE_LEGACY_GUCS: "false" PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} command: "postgrest" networks: - cc-network realtime: profiles: - database - supabase container_name: realtime-dev-${NGINX_MODE:-dev}.supabase-realtime image: supabase/realtime:v2.34.7 depends_on: db: condition: service_healthy analytics: condition: service_healthy healthcheck: test: [ "CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "-H", "Authorization: Bearer ${ANON_KEY}", "http://localhost:4000/api/tenants/realtime-dev/health", ] timeout: 5s interval: 5s retries: 3 restart: unless-stopped env_file: - .env environment: PORT: 4000 DB_HOST: ${POSTGRES_HOST} DB_PORT: ${POSTGRES_PORT} DB_USER: supabase_admin DB_PASSWORD: ${POSTGRES_PASSWORD} DB_NAME: ${POSTGRES_DB} DB_AFTER_CONNECT_QUERY: "SET search_path TO _realtime" DB_ENC_KEY: supabaserealtime API_JWT_SECRET: ${JWT_SECRET} SECRET_KEY_BASE: ${SECRET_KEY_BASE} ERL_AFLAGS: -proto_dist inet_tcp DNS_NODES: "''" RLIMIT_NOFILE: "10000" APP_NAME: realtime SEED_SELF_HOST: true RUN_JANITOR: true networks: - cc-network storage: profiles: - database - supabase container_name: supabase-storage-${NGINX_MODE:-dev} image: supabase/storage-api:v1.14.5 depends_on: db: condition: service_healthy rest: condition: service_started imgproxy: condition: service_started healthcheck: test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://storage:5000/status", ] timeout: 5s interval: 5s retries: 3 restart: unless-stopped env_file: - .env environment: ANON_KEY: ${ANON_KEY} SERVICE_KEY: ${SERVICE_ROLE_KEY} POSTGREST_URL: http://rest:3000 PGRST_JWT_SECRET: ${JWT_SECRET} DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} FILE_SIZE_LIMIT: 52428800 STORAGE_BACKEND: file FILE_STORAGE_BACKEND_PATH: /var/lib/storage TENANT_ID: stub REGION: stub GLOBAL_S3_BUCKET: stub ENABLE_IMAGE_TRANSFORMATION: "true" IMGPROXY_URL: http://imgproxy:5001 networks: - cc-network imgproxy: profiles: - database - supabase container_name: supabase-imgproxy-${NGINX_MODE:-dev} image: darthsim/imgproxy:v3.8.0 healthcheck: test: ["CMD", "imgproxy", "health"] timeout: 10s interval: 5s retries: 10 env_file: - .env environment: IMGPROXY_BIND: ":5001" IMGPROXY_LOCAL_FILESYSTEM_ROOT: / IMGPROXY_USE_ETAG: "true" IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} volumes: - ./local/data/supabase/storage-${NGINX_MODE:-dev}:/var/lib/storage:z networks: - cc-network meta: profiles: - database - supabase container_name: supabase-meta-${NGINX_MODE:-dev} image: supabase/postgres-meta:v0.84.2 depends_on: db: condition: service_healthy analytics: condition: service_healthy restart: unless-stopped env_file: - .env environment: PG_META_PORT: 8080 PG_META_DB_HOST: ${POSTGRES_HOST} PG_META_DB_PORT: ${POSTGRES_PORT} PG_META_DB_NAME: ${POSTGRES_DB} PG_META_DB_USER: supabase_admin PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} networks: - cc-network functions: profiles: - database - supabase container_name: supabase-edge-functions-${NGINX_MODE:-dev} image: supabase/edge-runtime:v1.67.0 restart: unless-stopped depends_on: analytics: condition: service_healthy env_file: - .env environment: JWT_SECRET: ${JWT_SECRET} SUPABASE_URL: ${SUPABASE_URL} SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" volumes: - ./supabase/functions:/home/deno/functions:Z command: - start - --main-service - /home/deno/functions/main networks: - cc-network analytics: profiles: - database - supabase container_name: supabase-analytics-${NGINX_MODE:-dev} image: supabase/logflare:1.4.0 healthcheck: test: ["CMD", "curl", "http://localhost:4000/health"] timeout: 10s interval: 5s retries: 10 restart: unless-stopped depends_on: db: condition: service_healthy env_file: - .env environment: LOGFLARE_NODE_HOST: 127.0.0.1 DB_USERNAME: supabase_admin DB_DATABASE: _supabase DB_HOSTNAME: ${POSTGRES_HOST} DB_PORT: ${POSTGRES_PORT} DB_PASSWORD: ${POSTGRES_PASSWORD} DB_SCHEMA: _analytics LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} LOGFLARE_SINGLE_TENANT: true LOGFLARE_SUPABASE_MODE: true LOGFLARE_MIN_CLUSTER_SIZE: 1 POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase POSTGRES_BACKEND_SCHEMA: _analytics LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true ports: - 4000:4000 networks: - cc-network db: profiles: - database - supabase container_name: supabase-db-${NGINX_MODE:-dev} image: supabase/postgres:15.8.1.020 healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres -h localhost || exit 1"] interval: 10s timeout: 5s retries: 20 start_period: 30s depends_on: vector: condition: service_healthy command: - postgres - -c - config_file=/etc/postgresql/postgresql.conf - -c - log_min_messages=fatal restart: unless-stopped env_file: - .env environment: POSTGRES_HOST: /var/run/postgresql PGPORT: ${POSTGRES_PORT} POSTGRES_PORT: ${POSTGRES_PORT} PGPASSWORD: ${POSTGRES_PASSWORD} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} PGDATABASE: ${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB} JWT_SECRET: ${JWT_SECRET} JWT_EXP: ${JWT_EXPIRY} volumes: - ./supabase/db/migrations/supabase/50-_supabase.sql:/docker-entrypoint-initdb.d/migrations/50-_supabase.sql - ./supabase/db/migrations/supabase/52-realtime.sql:/docker-entrypoint-initdb.d/migrations/52-realtime.sql - ./supabase/db/migrations/supabase/52-pooler.sql:/docker-entrypoint-initdb.d/migrations/52-pooler.sql - ./supabase/db/migrations/supabase/52-logs.sql:/docker-entrypoint-initdb.d/migrations/52-logs.sql - ./supabase/db/init-scripts/51-webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/51-webhooks.sql - ./supabase/db/init-scripts/52-roles.sql:/docker-entrypoint-initdb.d/init-scripts/52-roles.sql - ./supabase/db/init-scripts/52-jwt.sql:/docker-entrypoint-initdb.d/init-scripts/52-jwt.sql - ./supabase/db/migrations/core/60-create-databases.sql:/docker-entrypoint-initdb.d/migrations/60-create-databases.sql - ./supabase/db/migrations/core/61-core-schema.sql:/docker-entrypoint-initdb.d/migrations/61-core-schema.sql - ./supabase/db/migrations/core/62-functions-triggers.sql:/docker-entrypoint-initdb.d/migrations/62-functions-triggers.sql - ./supabase/db/migrations/core/63-storage-policies.sql:/docker-entrypoint-initdb.d/migrations/63-storage-policies.sql - ./supabase/db/migrations/core/64-initial-admin.sql:/docker-entrypoint-initdb.d/migrations/64-initial-admin.sql - ./supabase/db/migrations/core/65-keycloak-setup.sql:/docker-entrypoint-initdb.d/migrations/65-keycloak-setup.sql - supabase-db-data:/var/lib/postgresql/data - supabase-db-config:/etc/postgresql-custom networks: - cc-network vector: profiles: - database - supabase container_name: supabase-vector-${NGINX_MODE:-dev} image: timberio/vector:0.28.1-alpine healthcheck: test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://vector:9001/health", ] timeout: 10s interval: 10s retries: 10 volumes: - ./supabase/logs/vector.yml:/etc/vector/vector.yml:ro - /var/run/docker.sock:/var/run/docker.sock:ro env_file: - .env environment: LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} command: ["--config", "/etc/vector/vector.yml"] networks: - cc-network supavisor: profiles: - database - supabase container_name: supabase-pooler-${NGINX_MODE:-dev} image: supabase/supavisor:1.1.56 healthcheck: test: curl -sSfL --head -o /dev/null "http://127.0.0.1:4000/api/health" interval: 10s timeout: 10s retries: 10 depends_on: db: condition: service_healthy analytics: condition: service_healthy command: - /bin/sh - -c - /app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server restart: unless-stopped ports: - ${POSTGRES_PORT}:5432 - ${POOLER_PROXY_PORT_TRANSACTION}:6543 env_file: - .env environment: - PORT=4000 - POSTGRES_PORT=${POSTGRES_PORT} - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - DATABASE_URL=ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase - CLUSTER_POSTGRES=true - SECRET_KEY_BASE=${SECRET_KEY_BASE} - VAULT_ENC_KEY=${VAULT_ENC_KEY} - API_JWT_SECRET=${JWT_SECRET} - METRICS_JWT_SECRET=${JWT_SECRET} - REGION=local - ERL_AFLAGS=-proto_dist inet_tcp - POOLER_TENANT_ID=${POOLER_TENANT_ID} - POOLER_DEFAULT_POOL_SIZE=${POOLER_DEFAULT_POOL_SIZE} - POOLER_MAX_CLIENT_CONN=${POOLER_MAX_CLIENT_CONN} - POOLER_POOL_MODE=transaction volumes: - ./supabase/pooler/pooler.exs:/etc/pooler/pooler.exs:ro networks: - cc-network ollama: profiles: - none - ai_services container_name: ollama-${NGINX_MODE:-dev} build: context: ./cc-volumes/ollama/docker dockerfile: Dockerfile.${BUILD_OS}.${NGINX_MODE:-dev} ports: - "${PORT_OLLAMA}:11434" volumes: - ./local/data/ollama:/root/.ollama - ./local/logs/ollama:/var/log/ollama environment: - OLLAMA_HOST=0.0.0.0 - OLLAMA_ORIGINS=* networks: - cc-network deploy: resources: limits: cpus: '4' memory: 8G open-webui: profiles: - core - ai_services container_name: open-webui-${NGINX_MODE:-dev} image: ghcr.io/open-webui/open-webui:main ports: - "${PORT_OPEN_WEBUI:-3333}:8080" volumes: - ./local/${BUILD_OS}/${NGINX_MODE:-dev}/data/open-webui:/app/backend/data - ./local/${BUILD_OS}/${NGINX_MODE:-dev}/logs/open-webui:/app/backend/logs environment: - OLLAMA_LOG_LEVEL=DEBUG - WEBUI_URL=http://open-webui.classroomcopilot.test - DEFAULT_LOCALE=en - DEFAULT_USER_ROLE=pending where features - ENABLE_OAUTH_SIGNUP=true - OAUTH_CLIENT_ID=open-webui - OAUTH_CLIENT_SECRET=${KEYCLOAK_SECRET_OPENWEBUI} - OAUTH_PROVIDER_NAME=Keycloak - OAUTH_SCOPES=openid,email,profile # Optional - OAUTH_MERGE_ACCOUNTS_BY_EMAIL=true - OAUTH_ROLES_CLAIM=realm_access.roles - ENABLE_OAUTH_ROLE_MANAGEMENT=true - OAUTH_ALLOWED_ROLES=user,admin,superadmin - OAUTH_ADMIN_ROLES=superadmin,admin - OAUTH_ALLOWED_DOMAINS=kevlarai.test # Keycloak - OPENID_PROVIDER_URL=http://keycloak.kevlarai.test/realms/ClassroomCopilot/.well-known/openid-configuration - OLLAMA_BASE_URL=http://${HOST_OLLAMA}:11434 - PORT=8080 - WEBUI_PORT=8080 - HOST=0.0.0.0 env_file: - .env extra_hosts: - "keycloak.kevlarai.test=${HOST_IP}" networks: - cc-network deploy: resources: limits: cpus: '2' memory: 4G n8n: profiles: - none - ai_services container_name: n8n-${NGINX_MODE:-dev} build: context: ./cc-volumes/n8n/docker dockerfile: Dockerfile.${BUILD_OS}.${NGINX_MODE:-dev} ports: - "5678:5678" volumes: - ./local/data/n8n:/home/node/.n8n - ./local/logs/n8n:/home/node/.n8n/logs environment: - N8N_HOST=0.0.0.0 - N8N_PORT=5678 - N8N_PROTOCOL=http - N8N_USER_MANAGEMENT_DISABLED=true - N8N_BASIC_AUTH_ACTIVE=false - N8N_SECURE_COOKIE=false - NODE_ENV=production networks: - cc-network deploy: resources: limits: cpus: '2' memory: 4G volumes: supabase-db-config: driver: local supabase-db-data: driver: local neo4j-data: driver: local neo4j-logs: driver: local frontend-node-modules: driver: local frontend-dist: driver: local tldraw-sync-node-modules: driver: local redis-data: driver: local jupyter-user-data: driver: local networks: cc-network: name: cc-network driver: bridge