-
Notifications
You must be signed in to change notification settings - Fork 76
Expand file tree
/
Copy path.env.example
More file actions
57 lines (48 loc) · 1.53 KB
/
.env.example
File metadata and controls
57 lines (48 loc) · 1.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# Tokenizer
TOKENIZER_MODEL=
# LLM
# Support different backends: http_api, openai_api, ollama_api, ollama, huggingface, tgi, sglang, tensorrt
# http_api / openai_api
SYNTHESIZER_BACKEND=openai_api
SYNTHESIZER_MODEL=gpt-4o-mini
SYNTHESIZER_BASE_URL=
SYNTHESIZER_API_KEY=
TRAINEE_BACKEND=openai_api
TRAINEE_MODEL=gpt-4o-mini
TRAINEE_BASE_URL=
TRAINEE_API_KEY=
# azure_openai_api
# SYNTHESIZER_BACKEND=azure_openai_api
# The following is the same as your "Deployment name" in Azure
# SYNTHESIZER_MODEL=<your-deployment-name>
# SYNTHESIZER_BASE_URL=https://<your-resource-name>.openai.azure.com/openai/deployments/<your-deployment-name>/chat/completions
# SYNTHESIZER_API_KEY=
# SYNTHESIZER_API_VERSION=<api-version>
# # ollama_api
# SYNTHESIZER_BACKEND=ollama_api
# SYNTHESIZER_MODEL=gemma3
# SYNTHESIZER_BASE_URL=http://localhost:11434
#
# Note: TRAINEE with ollama_api backend is not supported yet as ollama_api does not support logprobs.
# # huggingface
# SYNTHESIZER_BACKEND=huggingface
# SYNTHESIZER_MODEL=Qwen/Qwen2.5-0.5B-Instruct
#
# TRAINEE_BACKEND=huggingface
# TRAINEE_MODEL=Qwen/Qwen2.5-0.5B-Instruct
# # sglang
# SYNTHESIZER_BACKEND=sglang
# SYNTHESIZER_MODEL=Qwen/Qwen2.5-0.5B-Instruct
# SYNTHESIZER_TP_SIZE=1
# SYNTHESIZER_NUM_GPUS=1
# TRAINEE_BACKEND=sglang
# TRAINEE_MODEL=Qwen/Qwen2.5-0.5B-Instruct
# SYNTHESIZER_TP_SIZE=1
# SYNTHESIZER_NUM_GPUS=1
# # vllm
# SYNTHESIZER_BACKEND=vllm
# SYNTHESIZER_MODEL=Qwen/Qwen2.5-0.5B-Instruct
# SYNTHESIZER_NUM_GPUS=1
# TRAINEE_BACKEND=vllm
# TRAINEE_MODEL=Qwen/Qwen2.5-0.5B-Instruct
# TRAINEE_NUM_GPUS=1