Skip to content

Commit 9f44d66

Browse files
authored
Merge pull request #10 from rishiraj/no-llm-dep
remove llm provider dependency
2 parents 6077d94 + 1a070fb commit 9f44d66

File tree

3 files changed

+52
-94
lines changed

3 files changed

+52
-94
lines changed

README.md

Lines changed: 25 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -123,30 +123,38 @@ fr.compare(url)
123123

124124
### Generating Text with LLMs
125125

126-
FireRequests supports generating responses from LLMs like OpenAI’s and Google’s generative models in parallel batches. This currently doesn't work in Colab.
126+
FireRequests allows you to run LLM API calls (like OpenAI or Google) in parallel batches using a decorator. This keeps the library lightweight and lets users supply their own logic for calling APIs. This approach currently doesn't work in Colab.
127127

128128
```python
129129
from firerequests import FireRequests
130130

131131
# Initialize FireRequests
132132
fr = FireRequests()
133133

134-
# Set parameters
135-
provider = "openai"
136-
model = "gpt-4o-mini"
137-
system_prompt = "Provide concise answers."
138-
user_prompts = ["What is AI?", "Explain quantum computing.", "What is Bitcoin?", "Explain neural networks."]
139-
parallel_requests = 2
140-
141-
# Generate responses
142-
responses = fr.generate(
143-
provider=provider,
144-
model=model,
145-
system_prompt=system_prompt,
146-
user_prompts=user_prompts,
147-
parallel_requests=parallel_requests
148-
)
149-
134+
# Use the decorator to define your own prompt function
135+
@fr.op(max_reqs=2, prompts=[
136+
"What is AI?",
137+
"Explain quantum computing.",
138+
"What is Bitcoin?",
139+
"Explain neural networks."
140+
])
141+
def generate(system: str = "Provide concise answers.", prompt: str = ""):
142+
# You can use OpenAI, Google, or any other LLM API here
143+
from openai import OpenAI
144+
import os
145+
146+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
147+
response = client.chat.completions.create(
148+
model="gpt-4o-mini",
149+
messages=[
150+
{"role": "system", "content": system},
151+
{"role": "user", "content": prompt}
152+
]
153+
)
154+
return response.choices[0].message.content
155+
156+
# Call your decorated function
157+
responses = generate()
150158
print(responses)
151159
```
152160

firerequests/main.py

Lines changed: 26 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -286,86 +286,38 @@ def compare(self, url: str, filename: Optional[str] = None):
286286
except Exception as e:
287287
print(f"Error in compare: {e}")
288288

289-
def call_openai_sync(self, model: str, system_prompt: str, user_prompt: str) -> str:
290-
from openai import OpenAI
291-
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
292-
completion = client.chat.completions.create(
293-
model=model,
294-
messages=[
295-
{"role": "system", "content": system_prompt},
296-
{"role": "user", "content": user_prompt}
297-
]
298-
)
299-
return completion.choices[0].message.content
300-
301-
async def call_openai(self, model: str, system_prompt: str, user_prompt: str) -> str:
302-
return await asyncio.to_thread(self.call_openai_sync, model, system_prompt, user_prompt)
303-
304-
def call_google_sync(self, model: str, system_prompt: str, user_prompt: str) -> str:
305-
import google.generativeai as genai
306-
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
307-
308-
generation_config = {
309-
"temperature": 1,
310-
"top_p": 0.95,
311-
"top_k": 40,
312-
"max_output_tokens": 8192,
313-
"response_mime_type": "text/plain",
314-
}
315-
316-
model_instance = genai.GenerativeModel(
317-
model_name=model,
318-
generation_config=generation_config,
319-
system_instruction=system_prompt,
320-
)
321-
322-
chat_session = model_instance.start_chat(history=[])
323-
response = chat_session.send_message(user_prompt)
324-
return response.text
325-
326-
async def call_google(self, model: str, system_prompt: str, user_prompt: str) -> str:
327-
return await asyncio.to_thread(self.call_google_sync, model, system_prompt, user_prompt)
328-
329-
async def generate_batch(
330-
self, provider: str, model: str, system_prompt: str, user_prompts: List[str]
331-
) -> List[str]:
332-
tasks = []
333-
for user_prompt in user_prompts:
334-
if provider.lower() == "openai":
335-
tasks.append(self.call_openai(model, system_prompt, user_prompt))
336-
elif provider.lower() == "google":
337-
tasks.append(self.call_google(model, system_prompt, user_prompt))
338-
else:
339-
raise ValueError("Unsupported provider. Choose either 'openai' or 'google'.")
340-
341-
responses = await asyncio.gather(*tasks)
342-
return responses
343-
344-
def generate(
345-
self, provider: str, model: str, system_prompt: str, user_prompts: List[str], parallel_requests: int = 10
346-
) -> List[str]:
289+
def op(self, max_reqs: int = 10, prompts: Optional[List[str]] = None):
347290
"""
348-
Generates responses for the given list of user prompts in parallel batches.
291+
Decorator to parallelize a user-defined prompt function over a list of prompts.
349292
350293
Args:
351-
provider (str): The API provider to use, either "openai" or "google".
352-
model (str): The model to use for generating responses.
353-
system_prompt (str): The system message prompt to include in each request.
354-
user_prompts (List[str]): List of user messages for generation.
355-
parallel_requests (int): Number of parallel requests to make.
294+
max_reqs (int): Maximum number of parallel tasks.
295+
prompts (List[str]): Prompts to process.
356296
357297
Returns:
358-
List[str]: List of generated responses corresponding to each user prompt.
298+
Decorated function executed in parallel using asyncio.
359299
"""
360-
async def generate_all():
361-
all_responses = []
362-
for i in range(0, len(user_prompts), parallel_requests):
363-
batch_prompts = user_prompts[i:i + parallel_requests]
364-
batch_responses = await self.generate_batch(provider, model, system_prompt, batch_prompts)
365-
all_responses.extend(batch_responses)
366-
return all_responses
367-
368-
return self.loop.run_until_complete(generate_all())
300+
def decorator(func):
301+
async def run_batch(prompts_batch):
302+
tasks = [asyncio.to_thread(func, prompt=prompt) for prompt in prompts_batch]
303+
return await asyncio.gather(*tasks)
304+
305+
def wrapper(*args, **kwargs):
306+
if prompts is None:
307+
raise ValueError("You must pass a list of prompts to the decorator.")
308+
results = []
309+
310+
async def run_all():
311+
for i in range(0, len(prompts), max_reqs):
312+
batch = prompts[i:i + max_reqs]
313+
batch_results = await run_batch(batch)
314+
results.extend(batch_results)
315+
return results
316+
317+
return self.loop.run_until_complete(run_all())
318+
319+
return wrapper
320+
return decorator
369321

370322
def main():
371323
fire.Fire(FireRequests)

requirements.txt

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,4 @@ aiofiles
33
requests
44
nest_asyncio
55
tqdm
6-
fire
7-
google-generativeai
8-
openai
6+
fire

0 commit comments

Comments
 (0)