-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathenv.example
More file actions
121 lines (87 loc) · 4.49 KB
/
env.example
File metadata and controls
121 lines (87 loc) · 4.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
export SYSTEM_MODE=production
# ____________ Workers and Coordinators Deployment Configuration: ________________
export COLDKEY_NAME=default
export HOTKEY_NAME=default
export TYPE=miner
export NETWORK=test
export PORT=60000
export IP=0.0.0.0
# Enables DEBUG logging
export DEBUG_MODE=true
# OpenAI key is always required for Default Settings and Embedding Model usage. Please enter:
export OPENAI_API_KEY=
# ____________ WANDB: ________________
# WandB Key is only required for coordinators.
# Get a free key from the Weights & Biases website at: https://wandb.ai/site
export WANDB_API_KEY=
export WAND_ENABLED=0
# __________ Local Chain ____________
# Override this if you want to run a local chain
export LOCAL_NETUID=
export LOCAL_CHAIN=
# ____________ OpenAI Configuration: ________________
# OpenAI is the default LLM provider for all operations, utilizing GPT-5.2.
# To override your OpenAI model choice, uncomment the line below, then proceed to selecting a model. For other override options, see "Select LLM Override" below.
#export LLM_TYPE_OVERRIDE=openai
#Enter a model below. See all options at: https://platform.openai.com/docs/models
# export OPENAI_MODEL=gpt-5.2
# ____________ Embeddings Model ________________
# DO NOT CHANGE. Ensures compatibility between coordinators and workers
export OPENAI_EMBEDDINGS_MODEL=text-embedding-3-large
# ____________ Select LLM Override________________
# OpenAI is the default LLM provider for all operations, utilizing GPT-4o. configure an override LLM choice (Not Recommended)
# Coordinators: API used to generate full convo tags and validate worker tags.
# Workers: API used to generate window tags.
# Note that the Llama models can be used through Groq and the Claude models can be used through Anthropic
#export LLM_TYPE_OVERRIDE=groq
#export LLM_TYPE_OVERRIDE=anthropic
#export LLM_TYPE_OVERRIDE=openrouter
# Continue below for additional configuration based on your override selection(s)
# ____________ GROQ Configuration: ________________
# *** Below Fields Required if you chose LLM_TYPE=groq -- https://groq.com/ ***
export GROQ_API_KEY=
# Enter a model below. See all options (use model ID): https://console.groq.com/docs/models
export GROQ_MODEL=llama3-8b-8192
# DO NOT CHANGE - required if LLM_TYPE=groq
export GROQ_DIRECT_CALL=1
# ____________ ANTHROPIC Configuration: ________________
# *** Below Fields Required if you chose LLM_TYPE=anthropic -- https://claude.ai/ ***
export ANTHROPIC_API_KEY=
# Enter a model below. See all options (use "Latest 1P API model name"): https://docs.anthropic.com/en/docs/about-claude/models#model-names
export ANTHROPIC_MODEL=claude-3-sonnet-20240229
# ____________ OPENROUTER Configuration: ________________
# *** Below Fields Required if you chose LLM_TYPE=openrouter -- https://openrouter.ai/ ***
export OPENROUTER_API_KEY=
# Enter a model below. See all options: https://openrouter.ai/models
export OPENROUTER_MODEL=deepseek/deepseek-chat
# Optional: Preferred provider (e.g., chutes)
export OPENROUTER_PROVIDER_PREFERENCE=chutes
# ____________ CHUTES Configuration: ________________
# *** Below Fields Required if you chose LLM_TYPE=chutes -- https://chutes.ai/ ***
export CHUTES_API_KEY=
# Override the default model. See all options: https://chutes.ai/models
# export CHUTES_MODEL=deepseek-ai/DeepSeek-V3
# Override the default embedding model. See all options: https://chutes.ai/models
# export CHUTES_EMBEDDING_URL=https://chutes-qwen-qwen3-embedding-8b.chutes.ai/v1/embeddings
# ____________ DB Read/Write Configuration: ____________
# For Validators. Read from api.conversations.xyz
export CGP_API_READ_HOST=https://api.conversations.xyz
export CGP_API_READ_PORT=443
# For Validators. Write to db.conversations.xyz
export CGP_API_WRITE_HOST=https://db.conversations.xyz
export CGP_API_WRITE_PORT=443
# For Validators. Used for local DB Configuration
# If you want to run a local API you can adjust the following variables:
export START_LOCAL_CGP_API=false
export LOCAL_CGP_API_PORT=8000
# You will also need to uncomment lines below
# See "Validating with a Custom Conversation Server" in the Readme.md for further information
#export CGP_API_READ_HOST=http://localhost
#export CGP_API_READ_PORT=$LOCAL_CGP_API_PORT
# Only uncomment this for local testing
#export CGP_API_WRITE_HOST=http://localhost
#export CGP_API_WRITE_PORT=$LOCAL_CGP_API_PORT
# ____________ Debug Log: ____________
# Optional, Commented by default.
# Uncomment to set a path to log the conversation windows and tags you mine for analysis
# export SCORING_DEBUG_LOG=./scoring_debug.log