-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
83 lines (76 loc) · 2.28 KB
/
utils.py
File metadata and controls
83 lines (76 loc) · 2.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from clients import openai_client
from dotenv import load_dotenv
from typing import List
import time
import os
import json
load_dotenv()
def get_embedding(query_text, model="text-embedding-3-large", dimensions=256):
query_text = query_text.replace("\n", " ")
return openai_client.embeddings.create(input = [query_text], model=model, dimensions=dimensions).data[0].embedding
def with_timing(func):
if os.getenv("DEBUG") != "1":
return func
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"[TIMING] '{func.__name__}' executed in {end_time - start_time:.4f} seconds")
return result
return wrapper
def system_prompt(func):
def wrapper(*args, **kwargs):
text = func(*args, **kwargs)
# if os.getenv("DEBUG") == "1":
# print("[PROMPT]", text)
return {
"role": "system",
"content": [
{
"type": "text",
"text": text
}
]
}
return wrapper
def user_prompt(func):
def wrapper(*args, **kwargs):
text = func(*args, **kwargs)
# if os.getenv("DEBUG") == "1":
# print("[PROMPT]", text)
return {
"role": "user",
"content": [
{
"type": "text",
"text": text
}
]
}
return wrapper
def openai_json_response(messages: List, model="gpt-4o-mini", temp=1, max_tokens=1024):
response = openai_client.chat.completions.create(
model=model,
messages=messages,
temperature=temp,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
response_format={
"type": "json_object"
}
)
return json.loads(response.choices[0].message.content)
def openai_stream(messages: List, model="gpt-4o-mini", temp=1, max_tokens=1024):
stream = openai_client.chat.completions.create(
model=model,
messages=messages,
temperature=temp,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stream=True
)
return stream