-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy path.env.example
More file actions
117 lines (86 loc) · 3.66 KB
/
.env.example
File metadata and controls
117 lines (86 loc) · 3.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# Symbiont OSS Environment Configuration
# Copy this file to .env and replace with actual values
# ==============================================================================
# API KEYS (REQUIRED)
# ==============================================================================
# OpenRouter API Key - Get from https://openrouter.ai/keys
OPENROUTER_API_KEY=your_openrouter_api_key_here
# OpenAI API Key - Required for RAG functionality (leave empty if using local models)
OPENAI_API_KEY=your_openai_api_key_here
# OpenAI API Base URL - Use for OpenAI-compatible endpoints like Ollama
# Examples:
# OpenAI: https://api.openai.com/v1
# Ollama: http://localhost:11434/v1
# LocalAI: http://localhost:8080/v1
OPENAI_API_BASE_URL=https://api.openai.com/v1
# ==============================================================================
# LLM & EMBEDDING MODEL CONFIGURATION
# ==============================================================================
# Chat model for general AI operations
# Examples:
# OpenAI: gpt-4, gpt-3.5-turbo
# Ollama: llama2, codellama, mistral
# LocalAI: your-model-name
CHAT_MODEL=gpt-3.5-turbo
# Embedding provider: "ollama" (local) or "openai" (cloud)
# Auto-detected from URL/API key if not set
EMBEDDING_PROVIDER=
# Embedding model for RAG functionality
# Examples:
# OpenAI: text-embedding-3-small, text-embedding-3-large
# Ollama: nomic-embed-text, all-minilm
EMBEDDING_MODEL=text-embedding-3-small
# Embedding API Base URL (can be different from chat API)
# Leave empty to use same as OPENAI_API_BASE_URL
EMBEDDING_API_BASE_URL=
# Embedding API Key (can be different from OpenAI API key)
# Leave empty to use OPENAI_API_KEY
EMBEDDING_API_KEY=
# ==============================================================================
# DATABASE CONFIGURATION
# ==============================================================================
# PostgreSQL connection string for main database
DATABASE_URL=postgresql://symbiont:password@localhost:5432/symbiont
# Redis connection string for caching and session management
REDIS_URL=redis://localhost:6379
# Qdrant vector database URL for RAG functionality
QDRANT_URL=http://localhost:6333
# ==============================================================================
# APPLICATION CONFIGURATION
# ==============================================================================
# Logging level: error, warn, info, debug, trace
LOG_LEVEL=info
# API server port
API_PORT=8080
# Application environment: development, staging, production
ENVIRONMENT=development
# ==============================================================================
# STORAGE PATHS
# ==============================================================================
# Directory for agent context and knowledge storage
CONTEXT_STORAGE_PATH=./agent_storage
# Git repository clone base path
GIT_CLONE_BASE_PATH=./temp_repos
# Backup directory for workflow operations
BACKUP_DIRECTORY=./backups
# ==============================================================================
# OPTIONAL CONFIGURATION OVERRIDES
# ==============================================================================
# Qdrant collection name for vector storage
QDRANT_COLLECTION_NAME=agent_knowledge
# Vector dimension for embeddings (typically 1536 for OpenAI)
VECTOR_DIMENSION=1536
# Maximum context size in MB
MAX_CONTEXT_SIZE_MB=100
# API request timeout in seconds
API_TIMEOUT_SECONDS=60
# Maximum tokens for AI responses
MAX_TOKENS=4000
# AI model temperature (0.0-1.0)
TEMPERATURE=0.1
# Default AI model for OpenRouter
OPENROUTER_MODEL=anthropic/claude-3.5-sonnet
# Enable/disable basic features (true/false)
ENABLE_COMPRESSION=true
ENABLE_BACKUPS=true
ENABLE_SAFETY_CHECKS=true