forked from HKUDS/DeepTutor
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
69 lines (62 loc) · 2.5 KB
/
.env.example
File metadata and controls
69 lines (62 loc) · 2.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# ============================================
# DeepTutor Environment Template
# ============================================
# Copy this file to `.env` and fill in your values.
# Core runtime settings are grouped as:
# ports / llm / embedding / search / docker-cloud / security
# --------------------------------------------
# Ports
# --------------------------------------------
BACKEND_PORT=8001
FRONTEND_PORT=3782
# --------------------------------------------
# LLM (Required)
# --------------------------------------------
# Supported bindings: openai, lm_studio, ollama, azure_openai, deepseek, ...
# For a full list run: deeptutor provider list // README
LLM_BINDING=openai
LLM_MODEL=gpt-4o-mini
LLM_API_KEY=sk-xxx
LLM_HOST=https://api.openai.com/v1
LLM_API_VERSION=
# --------------------------------------------
# Embedding (Required for knowledge base features)
# --------------------------------------------
EMBEDDING_BINDING=openai
EMBEDDING_MODEL=text-embedding-3-large
EMBEDDING_API_KEY=sk-xxx
EMBEDDING_HOST=https://api.openai.com/v1
EMBEDDING_DIMENSION=3072
EMBEDDING_API_VERSION=
# ⚠️ Docker + local LLM (LM Studio / Ollama / vLLM)
# ─────────────────────────────────────────────────────
# When running DeepTutor in Docker and your LLM runs on the HOST machine:
# - Do NOT use "localhost" or "127.0.0.1" — inside the container these
# refer to the container itself, not the host.
# - macOS / Windows Docker Desktop: use http://host.docker.internal:<port>/v1
# - Linux: use the host's LAN IP, e.g. http://192.168.1.100:<port>/v1
# (or run Docker with --network=host)
#
# Example (LM Studio on port 1234):
# LLM_BINDING=lm_studio
# LLM_HOST=http://host.docker.internal:1234/v1
# EMBEDDING_BINDING=lm_studio
# EMBEDDING_HOST=http://host.docker.internal:1234/v1
# --------------------------------------------
# Web Search (Optional)
# --------------------------------------------
SEARCH_PROVIDER=
SEARCH_API_KEY=
SEARCH_BASE_URL=
# --------------------------------------------
# Docker / Cloud deployment (Optional)
# --------------------------------------------
# Public backend URL used by the frontend when deployed remotely.
NEXT_PUBLIC_API_BASE_EXTERNAL=
# Alternative direct API base URL.
NEXT_PUBLIC_API_BASE=
# --------------------------------------------
# Security / Networking (Optional)
# --------------------------------------------
# Keep this false in production.
DISABLE_SSL_VERIFY=false