-
Notifications
You must be signed in to change notification settings - Fork 26
Expand file tree
/
Copy path.env.example
More file actions
173 lines (157 loc) · 7.84 KB
/
.env.example
File metadata and controls
173 lines (157 loc) · 7.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# Root environment variables — Rust core, Tauri shell, and shared settings.
# Copy to .env and fill in values as needed.
# Loaded via: source scripts/load-dotenv.sh
#
# Tags: [required] must be set, [optional] has a sensible default or can be blank
# ---------------------------------------------------------------------------
# App environment
# ---------------------------------------------------------------------------
# [optional] set to either 'production' or 'staging'
OPENHUMAN_APP_ENV=staging
# ---------------------------------------------------------------------------
# Backend API
# ---------------------------------------------------------------------------
# [optional] App environment selector for default paths/URLs: production | staging
OPENHUMAN_APP_ENV=staging
# [required] Primary backend URL (read by Rust core and QuickJS skills sandbox)
BACKEND_URL=https://staging-api.tinyhumans.ai
# [required] Also read by Vite frontend (VITE_ prefix required for browser exposure)
VITE_OPENHUMAN_APP_ENV=staging
VITE_BACKEND_URL=https://staging-api.tinyhumans.ai
# [optional] Consumer first-session UX in the desktop/web app (default off). See docs/plans/consumer-first-session-spec.md
# VITE_CONSUMER_FIRST_SESSION=true
# ---------------------------------------------------------------------------
# Authentication (for skills OAuth proxy and debug scripts)
# ---------------------------------------------------------------------------
# [optional] Session JWT — used by QuickJS skills sandbox for oauth.fetch proxy calls.
# Also used by debug scripts (scripts/debug-skill.sh, scripts/debug-notion-live.sh).
# Get from login flow or browser devtools.
JWT_TOKEN=
# ---------------------------------------------------------------------------
# Core process
# ---------------------------------------------------------------------------
# [optional] Default: 127.0.0.1 (use 0.0.0.0 for Docker / cloud).
# Leave unset to keep the default; the Docker image sets 0.0.0.0 automatically.
# OPENHUMAN_CORE_HOST=
# [optional] Default: 7788
OPENHUMAN_CORE_PORT=7788
# [optional] Default: http://127.0.0.1:7788/rpc
OPENHUMAN_CORE_RPC_URL=http://127.0.0.1:7788/rpc
# [optional] Run mode: child (default, spawns sidecar) | inprocess
OPENHUMAN_CORE_RUN_MODE=child
# [optional] Override path to openhuman core binary (leave blank for auto-detection)
OPENHUMAN_CORE_BIN=
# [optional] Explicit .env path for `openhuman serve` / `openhuman run` (loaded before the server starts).
# Must be set in the parent environment (exported in your shell or service manager). It is read before
# any dotenv file is loaded, so defining OPENHUMAN_DOTENV_PATH inside a .env file cannot select that file.
# OPENHUMAN_DOTENV_PATH=
# ---------------------------------------------------------------------------
# Config overrides (override config.toml values at runtime)
# ---------------------------------------------------------------------------
# [optional] Default model to use
OPENHUMAN_MODEL=
# [optional] Workspace directory (default: ~/.openhuman or ~/.openhuman-staging when OPENHUMAN_APP_ENV=staging)
OPENHUMAN_WORKSPACE=
# [optional] Default: 0.7
OPENHUMAN_TEMPERATURE=0.7
# [optional] Skill + agent tool execution timeout in seconds (default 120, max 3600)
# OPENHUMAN_TOOL_TIMEOUT_SECS=
# ---------------------------------------------------------------------------
# Runtime flags
# ---------------------------------------------------------------------------
# [optional] Default: 0
OPENHUMAN_BROWSER_ALLOW_ALL=0
# [optional] Default: 0
OPENHUMAN_LOG_PROMPTS=0
# [optional] Enable reasoning mode
OPENHUMAN_REASONING_ENABLED=
# ---------------------------------------------------------------------------
# Web search
# ---------------------------------------------------------------------------
# Web search is always enabled — no opt-in flag. Configure result budgets below.
# [optional] Default: 5
OPENHUMAN_WEB_SEARCH_MAX_RESULTS=5
# [optional] Default: 10
OPENHUMAN_WEB_SEARCH_TIMEOUT_SECS=10
# ---------------------------------------------------------------------------
# Proxy
# ---------------------------------------------------------------------------
# [optional] Default: false
OPENHUMAN_PROXY_ENABLED=false
# [optional] HTTP proxy URL
OPENHUMAN_HTTP_PROXY=
# [optional] HTTPS proxy URL
OPENHUMAN_HTTPS_PROXY=
# [optional] Catch-all proxy URL
OPENHUMAN_ALL_PROXY=
# [optional] Comma-separated hosts to bypass proxy
OPENHUMAN_NO_PROXY=
# [optional] Proxy scope
OPENHUMAN_PROXY_SCOPE=
# [optional] Comma-separated services to proxy
OPENHUMAN_PROXY_SERVICES=
# ---------------------------------------------------------------------------
# Local AI model tier
# ---------------------------------------------------------------------------
# [optional] Override selected model tier: low, medium, high
# Applies the corresponding preset at config load time (overrides config.toml).
OPENHUMAN_LOCAL_AI_TIER=
# ---------------------------------------------------------------------------
# Local AI binary overrides
# ---------------------------------------------------------------------------
# [optional] Override path to whisper binary
WHISPER_BIN=
# [optional] Override path to piper binary
PIPER_BIN=
# [optional] Override path to ollama binary
OLLAMA_BIN=
# ---------------------------------------------------------------------------
# Telegram managed login
# ---------------------------------------------------------------------------
# [optional] Bot username for managed Telegram DM linking (default: openhuman_bot)
OPENHUMAN_TELEGRAM_BOT_USERNAME=openhuman_bot
# ---------------------------------------------------------------------------
# Skills
# ---------------------------------------------------------------------------
# [optional] Override skills registry URL.
# Supports remote HTTP URLs and local file paths for development:
# SKILLS_REGISTRY_URL=https://example.com/registry.json (remote)
# SKILLS_REGISTRY_URL=/path/to/openhuman-skills/skills/registry.json (local)
# When set to a local path, the registry is read directly from disk on every
# call (no caching), so changes are picked up immediately.
SKILLS_REGISTRY_URL=
# [optional] Local skills source directory for development.
# Points to the built skills directory (the folder containing per-skill subdirs
# with manifest.json + index.js). When set, this takes highest priority for
# skill discovery and install will copy from this directory instead of downloading.
# Example: SKILLS_LOCAL_DIR=/Users/you/work/openhuman-skills/skills
SKILLS_LOCAL_DIR=
# [optional] Enable sync-derived user working memory extraction (default: true).
# Set to false to disable persisting `working.user.*` docs from skill sync payloads.
OPENHUMAN_SKILLS_WORKING_MEMORY_ENABLED=true
# ---------------------------------------------------------------------------
# Error Reporting (Sentry)
# ---------------------------------------------------------------------------
# [optional] Sentry DSN for Rust core error reporting (no PII is sent)
OPENHUMAN_SENTRY_DSN=
# [optional] Short git SHA baked into the Sentry release tag
# (`openhuman@<version>+<sha>`) via `option_env!("OPENHUMAN_BUILD_SHA")`.
# CI sets this automatically; leave blank locally (release tag falls back
# to `openhuman@<version>`).
OPENHUMAN_BUILD_SHA=
# [optional] Default: true — set to false to disable anonymized analytics & crash reports
OPENHUMAN_ANALYTICS_ENABLED=true
# ---------------------------------------------------------------------------
# Logging
# ---------------------------------------------------------------------------
# [optional] Default: info
RUST_LOG=info
# [optional] Default: 0 (set to 1 for full backtraces)
RUST_BACKTRACE=1
# ---------------------------------------------------------------------------
# Testing (do not set in production)
# ---------------------------------------------------------------------------
# [optional] Enable mock service mode
# OPENHUMAN_SERVICE_MOCK=0
# [optional] Path to mock state file
# OPENHUMAN_SERVICE_MOCK_STATE_FILE=