Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ jobs:
needs: test
with:
template-repository-name: "lambda-feedback/chat-function-boilerplate"
# allow for developer to specify the environment variables that are used by the deployed AWS Lambda. Default to mock then admin can update.
deployed-environment-variables: '["OPENAI_API_KEY","OPENAI_MODEL","GOOGLE_AI_API_KEY","GOOGLE_AI_MODEL"]'
permissions:
contents: read
id-token: write
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ jobs:
needs: test
with:
template-repository-name: "lambda-feedback/chat-function-boilerplate"
# allow for developer to specify the environment variables that are used by the deployed AWS Lambda. Default to mock then admin can update.
# deployed-environment-variables: '["OPENAI_API_KEY","OPENAI_MODEL","GOOGLE_AI_API_KEY","GOOGLE_AI_MODEL"]'
permissions:
contents: read
id-token: write
Expand Down
2 changes: 2 additions & 0 deletions src/agents/llm_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAIEmbeddings
from langchain_google_genai import ChatGoogleGenerativeAI
from dotenv import load_dotenv
load_dotenv()

class AzureLLMs:
def __init__(self, temperature: int = 0):
Expand Down
18 changes: 12 additions & 6 deletions src/agents/utils/parse_json_to_prompt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
""" File not to be modified. This file contains the conversion logic between the agent API and the Lambda Feedback backend."""

from typing import List, Optional, Union, Dict

# questionSubmissionSummary type
Expand Down Expand Up @@ -85,13 +83,21 @@ def __init__(
class QuestionDetails:
def __init__(
self,
setNumber: Optional[int] = None,
setName: Optional[str] = None,
setDescription: Optional[str] = None,
questionNumber: Optional[int] = None,
questionTitle: Optional[str] = None,
questionGuidance: Optional[str] = None,
questionContent: Optional[str] = None,
durationLowerBound: Optional[int] = None,
durationUpperBound: Optional[int] = None,
parts: Optional[List[PartDetails]] = [],
):
self.setNumber = setNumber
self.setName = setName
self.setDescription = setDescription
self.questionNumber = questionNumber
self.questionTitle = questionTitle
self.questionGuidance = questionGuidance
self.questionContent = questionContent
Expand Down Expand Up @@ -161,7 +167,7 @@ def format_response_area_details(responseArea: ResponseAreaDetails, studentSumma
{submissionDetails}"""

def format_part_details(part: PartDetails, currentPart: CurrentPart, summary: List[StudentWorkResponseArea]) -> str:
if not part or not part.publishedResponseAreas:
if not part:
return ''

responseAreas = "\n".join(
Expand All @@ -187,9 +193,9 @@ def format_part_details(part: PartDetails, currentPart: CurrentPart, summary: Li
"""

questionDetails = f"""This is the question I am currently working on. I am currently working on Part ({convert_index_to_lowercase_letter(questionAccessInformation.currentPart.position)}). Below, you'll find its details, including the parts of the question, my responses for each response area, and the feedback I received. This information highlights my efforts and progress so far. Use this this information to inform your understanding about the question materials provided to me and my work on them.
Maths equations are in KaTex format, preserve them the same.

# Question: {questionInformation.questionTitle};
Maths equations are in KaTex format, preserve them the same. Use British English spellings.
{f'# Question Set {questionInformation.setNumber + 1}: {questionInformation.setName};' if questionInformation.setName and questionInformation.setNumber else ''}
# Question{f' {questionInformation.setNumber + 1}.{questionInformation.questionNumber + 1}' if questionInformation.setNumber and questionInformation.questionNumber else ''}: {questionInformation.questionTitle};
Guidance to Solve the Question: {questionInformation.questionGuidance or 'None'};
Description of Question: {questionInformation.questionContent};
Expected Time to Complete the Question: {f'{questionInformation.durationLowerBound} - {questionInformation.durationUpperBound} min;' if questionInformation.durationLowerBound and questionInformation.durationUpperBound else 'No specified duration.'}
Expand Down
10 changes: 6 additions & 4 deletions src/agents/utils/synthetic_conversation_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@
try:
from ..student_agent.student_agent import invoke_student_agent
from .parse_json_to_prompt import parse_json_to_prompt
from ..base_agent import invoke_base_agent
from ..base_agent.base_agent import invoke_base_agent
except ImportError:
from src.agents.student_agent.student_agent import invoke_student_agent
from src.agents.utils.parse_json_to_prompt import parse_json_to_prompt
from src.agents.base_agent import invoke_base_agent
from src.agents.base_agent.base_agent import invoke_base_agent
import os


Expand Down Expand Up @@ -70,11 +70,11 @@ def generate_synthetic_conversations(raw_text: str, num_turns: int, student_agen
# Student starts
student_response = invoke_student_agent(message, conversation_history[:-1], summary, student_agent_type, question_response_details_prompt, conversation_id)
conversation_history.append({
"role": "assistant",
"role": "user",
"content": student_response["output"]
})
else:
tutor_response = invoke_tutor_agent(message, conversation_history[:-1], summary, conversational_style, question_response_details_prompt, conversation_id)
tutor_response = invoke_tutor_agent(message, conversation_history, summary, conversational_style, question_response_details_prompt, conversation_id)
conversation_history.append({
"role": "assistant",
"content": tutor_response["output"]
Expand All @@ -88,6 +88,8 @@ def generate_synthetic_conversations(raw_text: str, num_turns: int, student_agen
# Save Conversation
conversation_output = {
"conversation_id": conversation_id+"_"+student_agent_type+"_"+tutor_agent_type+"_synthetic",
"student_agent_type": student_agent_type,
"tutor_agent_type": tutor_agent_type,
"conversation": conversation_history
}
return conversation_output
Expand Down
Loading