diff --git a/.gitignore b/.gitignore
index 0f34269d..0244508f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,7 +11,7 @@ __pycache__/
.venv/
env/
venv/
-*.env*
+*.env
.env_example
diff --git a/prompts/code_prompts.py b/prompts/code_prompts.py
index 13a61ade..a3dfaebc 100644
--- a/prompts/code_prompts.py
+++ b/prompts/code_prompts.py
@@ -13,6 +13,10 @@
- PAPER_ALGORITHM_ANALYSIS_PROMPT: 专注算法提取,明确实现优先级
- PAPER_CONCEPT_ANALYSIS_PROMPT: 专注系统架构,突出概念到代码的映射
- CODE_PLANNING_PROMPT: 整合前两者输出,生成高质量复现计划
+
+NEW: 用户需求分析相关提示词
+- REQUIREMENT_QUESTION_GENERATION_PROMPT: 基于初始需求生成引导性问题
+- REQUIREMENT_SUMMARY_PROMPT: 基于问答生成详细需求文档
"""
# Paper to Code Workflow Prompts
diff --git a/prompts/requirement_analysis_prompts.py b/prompts/requirement_analysis_prompts.py
new file mode 100644
index 00000000..d8987826
--- /dev/null
+++ b/prompts/requirement_analysis_prompts.py
@@ -0,0 +1,71 @@
+"""
+User requirement analysis related prompt templates
+
+Contains prompt templates for requirement analysis Agent, supporting question generation and requirement summarization functions.
+"""
+
+# ========================================
+# User requirement analysis related prompt templates
+# ========================================
+
+REQUIREMENT_QUESTION_GENERATION_PROMPT = """You are a professional requirement analysis expert, skilled at helping users refine project requirements through precise questions.
+
+Please generate 1-3 precise guiding questions based on user's initial requirement description to help users provide more detailed information.
+
+User Initial Requirements:
+{user_input}
+
+Please generate a JSON format question list, each question contains the following fields:
+- category: Question category (such as "Functional Requirements", "Technology Selection", "Performance Requirements", "User Interface", "Deployment Environment", etc.)
+- question: Specific question content
+- importance: Importance level ("High", "Medium", "Low")
+- hint: Question hint or example (optional)
+
+Requirements:
+1. Questions should be highly targeted, based on user's specific requirement scenarios
+2. Cover key decision points for project implementation
+3. Avoid overly technical questions, maintain user-friendliness
+4. Questions should have logical correlation
+5. Ensure questions help users think about important details they might have missed
+
+Please return JSON format results directly, without including other text descriptions."""
+
+REQUIREMENT_SUMMARY_PROMPT = """You are a professional technical requirement analyst, skilled at converting user requirement descriptions into detailed technical specification documents.
+
+Please generate a detailed project requirement document based on user's initial requirements and supplementary responses.
+
+User Initial Requirements:
+{initial_input}
+
+User Supplementary Responses:
+{answers_text}
+
+Please generate a concise requirement document focusing on the following core sections:
+
+## Project Overview
+Brief description of project's core goals and value proposition
+
+## Functional Requirements
+Detailed list of required features and functional modules:
+- Core functionalities
+- User interactions and workflows
+- Data processing requirements
+- Integration needs
+
+## Technical Architecture
+Recommended technical design including:
+- Technology stack and frameworks
+- System architecture design
+- Database and data storage solutions
+- API design considerations
+- Security requirements
+
+## Performance & Scalability
+- Expected user scale and performance requirements
+- Scalability considerations and constraints
+
+Requirements:
+1. Focus on what needs to be built and how to build it technically
+2. Be concise but comprehensive - avoid unnecessary implementation details
+3. Provide clear functional specifications and technical architecture guidance
+4. Consider project feasibility and technical complexity"""
diff --git a/ui/components.py b/ui/components.py
index 2fa57050..5fc277b5 100644
--- a/ui/components.py
+++ b/ui/components.py
@@ -6,7 +6,7 @@
import streamlit as st
import sys
-from typing import Dict, Any, Optional
+from typing import Dict, Any, Optional, List
from datetime import datetime
import json
@@ -791,9 +791,238 @@ def url_input_component(task_counter: int) -> Optional[str]:
return None
+def requirement_analysis_mode_selector(task_counter: int) -> str:
+ """
+ Requirement analysis mode selector
+
+ Args:
+ task_counter: Task counter
+
+ Returns:
+ Selected mode ("direct" or "guided")
+ """
+ st.markdown("""
+
+
+ 🎯 Choose Your Input Mode
+
+
+ Select how you'd like to provide your requirements
+
+
+ """, unsafe_allow_html=True)
+
+ mode = st.radio(
+ "Input mode:",
+ ["🚀 Direct Input", "🧠 Guided Analysis"],
+ index=0 if st.session_state.get("requirement_analysis_mode", "direct") == "direct" else 1,
+ horizontal=True,
+ help="Direct: Enter requirements directly. Guided: AI asks questions to help you clarify needs.",
+ key=f"req_mode_{task_counter}"
+ )
+
+ return "direct" if mode.startswith("🚀") else "guided"
+
+
+def requirement_questions_component(questions: List[Dict], task_counter: int) -> Dict[str, str]:
+ """
+ Requirement questions display and answer collection component
+
+ Args:
+ questions: Question list
+ task_counter: Task counter
+
+ Returns:
+ User answer dictionary
+ """
+ st.markdown("""
+
+
+ 📝 Help Us Understand Your Needs Better
+
+
+ Please answer the following questions to help us generate better code. You can skip any question.
+
+
+ """, unsafe_allow_html=True)
+
+ answers = {}
+
+ for i, question in enumerate(questions):
+ with st.expander(f"📋 {question.get('category', 'Question')} - {question.get('importance', 'Medium')} Priority", expanded=i < 3):
+ st.markdown(f"**{question['question']}**")
+
+ if question.get('hint'):
+ st.info(f"💡 {question['hint']}")
+
+ answer = st.text_area(
+ "Your answer:",
+ placeholder="Enter your answer here, or leave blank to skip...",
+ height=80,
+ key=f"answer_{i}_{task_counter}"
+ )
+
+ if answer and answer.strip():
+ answers[str(i)] = answer.strip()
+
+ st.markdown("---")
+ st.info(f"📊 You've answered {len(answers)} out of {len(questions)} questions.")
+
+ return answers
+
+
+def requirement_summary_component(summary: str, task_counter: int) -> bool:
+ """
+ Requirement summary display and confirmation component
+
+ Args:
+ summary: Requirement summary document
+ task_counter: Task counter
+
+ Returns:
+ Whether user confirms requirements
+ """
+ st.markdown("""
+
+
+ 📋 Detailed Requirements Summary
+
+
+ Based on your input, here's the detailed requirements document we've generated.
+
+
+ """, unsafe_allow_html=True)
+
+ # Display requirement summary
+ with st.expander("📖 View Detailed Requirements", expanded=True):
+ st.markdown(summary)
+
+ # Confirmation options
+ st.markdown("### 🎯 Next Steps")
+
+ col1, col2, col3 = st.columns(3)
+
+ with col1:
+ if st.button("✅ Looks Good, Proceed", type="primary", use_container_width=True, key=f"confirm_{task_counter}"):
+ # Mark requirements as confirmed, prepare to enter code generation
+ st.session_state.requirements_confirmed = True
+ return True
+
+ with col2:
+ if st.button("✏️ Edit Requirements", type="secondary", use_container_width=True, key=f"edit_{task_counter}"):
+ # Enter editing mode
+ st.session_state.requirement_analysis_step = "editing"
+ st.session_state.edit_feedback = ""
+ st.rerun()
+
+ with col3:
+ if st.button("🔄 Start Over", use_container_width=True, key=f"restart_{task_counter}"):
+ # Complete reset
+ st.session_state.requirement_analysis_mode = "direct"
+ st.session_state.requirement_analysis_step = "input"
+ st.session_state.generated_questions = []
+ st.session_state.user_answers = {}
+ st.session_state.detailed_requirements = ""
+ st.rerun()
+
+ return False
+
+
+def requirement_editing_component(current_requirements: str, task_counter: int) -> bool:
+ """
+ Interactive requirement editing component
+
+ Args:
+ current_requirements: Current requirement document content
+ task_counter: Task counter
+
+ Returns:
+ Whether editing is completed
+ """
+ st.markdown("""
+
+
+ ✏️ Edit Requirements Document
+
+
+ Review the current requirements and tell us how you'd like to modify them.
+
+
+ """, unsafe_allow_html=True)
+
+ # Display current requirements
+ st.markdown("### 📋 Current Requirements")
+ with st.expander("📖 View Current Requirements Document", expanded=True):
+ st.markdown(current_requirements)
+
+ # Ask for modification feedback
+ st.markdown("### 💭 How would you like to modify the requirements?")
+ st.markdown("Please describe your changes, additions, or corrections:")
+
+ edit_feedback = st.text_area(
+ "Your modification request:",
+ value=st.session_state.edit_feedback,
+ placeholder="For example:\n- Add user authentication feature\n- Change database from MySQL to PostgreSQL",
+ height=120,
+ key=f"edit_feedback_{task_counter}"
+ )
+
+ # Update session state
+ st.session_state.edit_feedback = edit_feedback
+
+ # Action buttons
+ col1, col2, col3 = st.columns(3)
+
+ with col1:
+ if st.button("🔄 Apply Changes", type="primary", use_container_width=True, key=f"apply_edit_{task_counter}"):
+ if edit_feedback.strip():
+ # Start requirement modification process
+ st.session_state.requirements_editing = True
+ st.info("🔄 Processing your modification request...")
+ return True
+ else:
+ st.warning("Please provide your modification request first.")
+
+ with col2:
+ if st.button("↩️ Back to Summary", type="secondary", use_container_width=True, key=f"back_summary_{task_counter}"):
+ # Go back to summary view
+ st.session_state.requirement_analysis_step = "summary"
+ st.session_state.edit_feedback = ""
+ st.rerun()
+
+ with col3:
+ if st.button("🔄 Start Over", use_container_width=True, key=f"restart_edit_{task_counter}"):
+ # Complete reset
+ st.session_state.requirement_analysis_mode = "direct"
+ st.session_state.requirement_analysis_step = "input"
+ st.session_state.generated_questions = []
+ st.session_state.user_answers = {}
+ st.session_state.detailed_requirements = ""
+ st.session_state.edit_feedback = ""
+ st.rerun()
+
+ return False
+
+
def chat_input_component(task_counter: int) -> Optional[str]:
"""
- Chat input component for coding requirements
+ Enhanced chat input component with requirement analysis support
Args:
task_counter: Task counter
@@ -801,6 +1030,20 @@ def chat_input_component(task_counter: int) -> Optional[str]:
Returns:
User coding requirements or None
"""
+ # Select input mode
+ selected_mode = requirement_analysis_mode_selector(task_counter)
+
+ # Update requirement analysis mode
+ st.session_state.requirement_analysis_mode = selected_mode
+
+ if selected_mode == "direct":
+ return _direct_input_component(task_counter)
+ else:
+ return _guided_analysis_component(task_counter)
+
+
+def _direct_input_component(task_counter: int) -> Optional[str]:
+ """Direct input mode component"""
st.markdown(
"""
- 💬 Describe Your Coding Requirements
+ 🚀 Direct Input Mode
- Tell us what you want to build. Our AI will analyze your requirements and generate a comprehensive implementation plan.
+ Describe your coding requirements directly. Our AI will analyze and generate a comprehensive implementation plan.
""",
@@ -852,7 +1095,7 @@ def chat_input_component(task_counter: int) -> Optional[str]:
The system should be scalable and production-ready, with proper error handling and documentation.""",
height=200,
help="Describe what you want to build, including functionality, technologies, and any specific requirements",
- key=f"chat_input_{task_counter}",
+ key=f"direct_input_{task_counter}",
)
if user_input and len(user_input.strip()) > 20: # Minimum length check
@@ -871,7 +1114,7 @@ def chat_input_component(task_counter: int) -> Optional[str]:
user_input,
height=100,
disabled=True,
- key=f"preview_{task_counter}",
+ key=f"direct_preview_{task_counter}",
)
return user_input.strip()
@@ -885,6 +1128,171 @@ def chat_input_component(task_counter: int) -> Optional[str]:
return None
+def _guided_analysis_component(task_counter: int) -> Optional[str]:
+ """Guided analysis mode component"""
+
+ # Check if requirements are confirmed, if confirmed return detailed requirements directly
+ if st.session_state.get("requirements_confirmed", False):
+ detailed_requirements = st.session_state.get("detailed_requirements", "")
+ if detailed_requirements:
+ # Show confirmation message and return requirements for processing
+ st.success("🎉 Requirement analysis completed! Starting code generation...")
+ st.info("🔄 Automatically proceeding to code generation based on your confirmed requirements.")
+ return detailed_requirements
+
+ st.markdown(
+ """
+
+
+ 🧠 Guided Analysis Mode
+
+
+ Let our AI guide you through a series of questions to better understand your requirements.
+
+
+ """,
+ unsafe_allow_html=True,
+ )
+
+ # Check current step
+ current_step = st.session_state.get("requirement_analysis_step", "input")
+
+ if current_step == "input":
+ return _guided_input_step(task_counter)
+ elif current_step == "questions":
+ return _guided_questions_step(task_counter)
+ elif current_step == "summary":
+ return _guided_summary_step(task_counter)
+ elif current_step == "editing":
+ return _guided_editing_step(task_counter)
+ else:
+ # Reset to initial state
+ st.session_state.requirement_analysis_step = "input"
+ st.rerun()
+
+
+def _guided_input_step(task_counter: int) -> Optional[str]:
+ """Initial input step for guided mode"""
+ st.markdown("### 📝 Step 1: Tell us your basic idea")
+
+ user_input = st.text_area(
+ "What would you like to build? (Brief description is fine)",
+ placeholder="Example: A web app for sentiment analysis of social media posts",
+ height=120,
+ help="Don't worry about details - we'll ask specific questions next!",
+ key=f"guided_input_{task_counter}",
+ )
+
+ if user_input and len(user_input.strip()) > 10:
+ col1, col2 = st.columns([3, 1])
+
+ with col1:
+ st.info(f"📝 Initial idea captured: {len(user_input.split())} words")
+
+ with col2:
+ if st.button("🚀 Generate Questions", type="primary", use_container_width=True):
+ # Save initial input and enter question generation step
+ st.session_state.initial_requirement = user_input.strip()
+ st.session_state.requirement_analysis_step = "questions"
+ st.rerun()
+
+ elif user_input and len(user_input.strip()) <= 10:
+ st.warning("⚠️ Please provide at least a brief description (more than 10 characters)")
+
+ return None
+
+
+def _guided_questions_step(task_counter: int) -> Optional[str]:
+ """Question answering step for guided mode"""
+ st.markdown("### 🤔 Step 2: Answer questions to refine your requirements")
+
+ # Display initial requirements
+ with st.expander("📋 Your Initial Idea", expanded=False):
+ st.write(st.session_state.get("initial_requirement", ""))
+
+ # Check if questions have been generated
+ if not st.session_state.get("generated_questions"):
+ st.info("🔄 Generating personalized questions for your project...")
+
+ # Async call needed here, but we show placeholder in UI first
+ if st.button("🎯 Generate Questions Now", type="primary"):
+ st.session_state.questions_generating = True
+ st.rerun()
+ return None
+
+ # Display questions and collect answers
+ questions = st.session_state.generated_questions
+ answers = requirement_questions_component(questions, task_counter)
+ st.session_state.user_answers = answers
+
+ # Continue button
+ col1, col2, col3 = st.columns([1, 2, 1])
+
+ with col2:
+ if st.button("📋 Generate Detailed Requirements", type="primary", use_container_width=True):
+ st.session_state.requirement_analysis_step = "summary"
+ st.rerun()
+
+ with col1:
+ if st.button("⬅️ Back", use_container_width=True):
+ st.session_state.requirement_analysis_step = "input"
+ st.rerun()
+
+ return None
+
+
+def _guided_summary_step(task_counter: int) -> Optional[str]:
+ """Requirement summary step for guided mode"""
+ st.markdown("### 📋 Step 3: Review and confirm your detailed requirements")
+
+ # Check if detailed requirements have been generated
+ if not st.session_state.get("detailed_requirements"):
+ st.info("🔄 Generating detailed requirements based on your answers...")
+
+ if st.button("📋 Generate Requirements Now", type="primary"):
+ st.session_state.requirements_generating = True
+ st.rerun()
+ return None
+
+ # Display requirement summary and get confirmation
+ summary = st.session_state.detailed_requirements
+ confirmed = requirement_summary_component(summary, task_counter)
+
+ if confirmed:
+ # Return detailed requirements as final input
+ return summary
+
+ return None
+
+
+def _guided_editing_step(task_counter: int) -> Optional[str]:
+ """Requirement editing step for guided mode"""
+ st.markdown("### ✏️ Step 4: Edit your requirements")
+
+ # Get current requirements
+ current_requirements = st.session_state.get("detailed_requirements", "")
+ if not current_requirements:
+ st.error("No requirements found to edit. Please start over.")
+ st.session_state.requirement_analysis_step = "input"
+ st.rerun()
+ return None
+
+ # Show editing component
+ editing_requested = requirement_editing_component(current_requirements, task_counter)
+
+ if editing_requested:
+ # User has provided editing feedback, trigger requirement modification
+ st.session_state.requirements_editing = True
+ st.rerun()
+ return None
+
+ return None
+
+
def input_method_selector(task_counter: int) -> tuple[Optional[str], Optional[str]]:
"""
Input method selector
diff --git a/ui/handlers.py b/ui/handlers.py
index f3a03bc3..15da290e 100644
--- a/ui/handlers.py
+++ b/ui/handlers.py
@@ -587,6 +587,199 @@ def cleanup_temp_file(input_source: str, input_type: str):
pass
+async def handle_requirement_analysis_workflow(
+ user_input: str,
+ analysis_mode: str,
+ user_answers: Dict[str, str] = None
+) -> Dict[str, Any]:
+ """
+ Handle requirement analysis workflow
+
+ Args:
+ user_input: User initial requirements
+ analysis_mode: Analysis mode ("generate_questions" or "summarize_requirements")
+ user_answers: User answer dictionary
+
+ Returns:
+ Processing result dictionary
+ """
+ try:
+ # Import required modules
+ from workflows.agent_orchestration_engine import execute_requirement_analysis_workflow
+
+ # Create progress callback function
+ def update_progress(progress: int, message: str):
+ # Display progress in Streamlit
+ st.session_state.current_progress = progress
+ st.session_state.current_message = message
+
+ # Execute requirement analysis workflow
+ result = await execute_requirement_analysis_workflow(
+ user_input=user_input,
+ analysis_mode=analysis_mode,
+ user_answers=user_answers,
+ logger=None, # Can pass in logger
+ progress_callback=update_progress
+ )
+
+ return result
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "error": str(e),
+ "message": f"Requirement analysis workflow execution failed: {str(e)}"
+ }
+
+
+async def handle_requirement_modification_workflow(
+ current_requirements: str,
+ modification_feedback: str
+) -> Dict[str, Any]:
+ """
+ Handle requirement modification workflow
+
+ Args:
+ current_requirements: Current requirement document content
+ modification_feedback: User's modification requests and feedback
+
+ Returns:
+ Processing result dictionary
+ """
+ try:
+ # Import required modules
+ from workflows.agents.requirement_analysis_agent import RequirementAnalysisAgent
+ from utils.llm_utils import get_preferred_llm_class
+
+ # Create progress callback function
+ def update_progress(progress: int, message: str):
+ # Display progress in Streamlit
+ st.session_state.current_progress = progress
+ st.session_state.current_message = message
+
+ update_progress(10, "🔧 Initializing requirement modification agent...")
+
+ # Initialize RequirementAnalysisAgent
+ agent = RequirementAnalysisAgent()
+
+ # Initialize agent (LLM is initialized internally)
+ await agent.initialize()
+
+ update_progress(50, "✏️ Modifying requirements based on your feedback...")
+
+ # Modify requirements
+ result = await agent.modify_requirements(
+ current_requirements=current_requirements,
+ modification_feedback=modification_feedback
+ )
+
+ # Cleanup
+ await agent.cleanup()
+
+ update_progress(100, "✅ Requirements modification completed!")
+
+ return {
+ "status": "success",
+ "result": result,
+ "message": "Requirements modification completed successfully"
+ }
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "error": str(e),
+ "message": f"Requirements modification workflow execution failed: {str(e)}"
+ }
+
+
+def handle_guided_mode_processing():
+ """Handle asynchronous processing for guided mode"""
+ # Check if questions need to be generated
+ if st.session_state.get("questions_generating", False):
+ st.session_state.questions_generating = False
+
+ # Asynchronously generate questions
+ initial_req = st.session_state.get("initial_requirement", "")
+ if initial_req:
+ try:
+ # Use asynchronous processing to generate questions
+ result = run_async_task_simple(
+ handle_requirement_analysis_workflow(
+ user_input=initial_req,
+ analysis_mode="generate_questions"
+ )
+ )
+
+ if result["status"] == "success":
+ # Parse JSON result
+ import json
+ questions = json.loads(result["result"])
+ st.session_state.generated_questions = questions
+ else:
+ st.error(f"Question generation failed: {result.get('error', 'Unknown error')}")
+
+ except Exception as e:
+ st.error(f"Question generation exception: {str(e)}")
+
+ # Check if detailed requirements need to be generated
+ if st.session_state.get("requirements_generating", False):
+ st.session_state.requirements_generating = False
+
+ # Asynchronously generate detailed requirements
+ initial_req = st.session_state.get("initial_requirement", "")
+ user_answers = st.session_state.get("user_answers", {})
+
+ if initial_req:
+ try:
+ # Use asynchronous processing to generate requirement summary
+ result = run_async_task_simple(
+ handle_requirement_analysis_workflow(
+ user_input=initial_req,
+ analysis_mode="summarize_requirements",
+ user_answers=user_answers
+ )
+ )
+
+ if result["status"] == "success":
+ st.session_state.detailed_requirements = result["result"]
+ else:
+ st.error(f"Requirement summary generation failed: {result.get('error', 'Unknown error')}")
+
+ except Exception as e:
+ st.error(f"Requirement summary generation exception: {str(e)}")
+
+ # Check if requirements need to be edited
+ if st.session_state.get("requirements_editing", False):
+ st.session_state.requirements_editing = False
+ st.info("🔧 Starting requirement modification process...")
+
+ # Asynchronously modify requirements based on user feedback
+ current_requirements = st.session_state.get("detailed_requirements", "")
+ edit_feedback = st.session_state.get("edit_feedback", "")
+
+ if current_requirements and edit_feedback:
+ try:
+ # Use asynchronous processing to modify requirements
+ result = run_async_task_simple(
+ handle_requirement_modification_workflow(
+ current_requirements=current_requirements,
+ modification_feedback=edit_feedback
+ )
+ )
+
+ if result["status"] == "success":
+ st.session_state.detailed_requirements = result["result"]
+ st.session_state.requirement_analysis_step = "summary"
+ st.session_state.edit_feedback = ""
+ st.success("✅ Requirements updated successfully!")
+ st.rerun()
+ else:
+ st.error(f"Requirements modification failed: {result.get('error', 'Unknown error')}")
+
+ except Exception as e:
+ st.error(f"Requirements modification exception: {str(e)}")
+
+
def handle_start_processing_button(input_source: str, input_type: str):
"""
Handle start processing button click
@@ -665,6 +858,30 @@ def initialize_session_state():
st.session_state.enable_indexing = (
False # Default enable indexing functionality
)
+
+ # Requirement analysis related states
+ if "requirement_analysis_mode" not in st.session_state:
+ st.session_state.requirement_analysis_mode = "direct" # direct/guided
+ if "requirement_analysis_step" not in st.session_state:
+ st.session_state.requirement_analysis_step = "input" # input/questions/summary
+ if "generated_questions" not in st.session_state:
+ st.session_state.generated_questions = []
+ if "user_answers" not in st.session_state:
+ st.session_state.user_answers = {}
+ if "detailed_requirements" not in st.session_state:
+ st.session_state.detailed_requirements = ""
+ if "initial_requirement" not in st.session_state:
+ st.session_state.initial_requirement = ""
+ if "questions_generating" not in st.session_state:
+ st.session_state.questions_generating = False
+ if "requirements_generating" not in st.session_state:
+ st.session_state.requirements_generating = False
+ if "requirements_confirmed" not in st.session_state:
+ st.session_state.requirements_confirmed = False
+ if "edit_feedback" not in st.session_state:
+ st.session_state.edit_feedback = ""
+ if "requirements_editing" not in st.session_state:
+ st.session_state.requirements_editing = False
def cleanup_resources():
diff --git a/ui/layout.py b/ui/layout.py
index 54185f55..5d22e447 100644
--- a/ui/layout.py
+++ b/ui/layout.py
@@ -18,6 +18,7 @@
initialize_session_state,
handle_start_processing_button,
handle_error_display,
+ handle_guided_mode_processing,
)
from .styles import get_main_styles
@@ -62,11 +63,31 @@ def render_main_content():
def render_input_interface():
"""Render input interface"""
- # Get input source and type
- input_source, input_type = input_method_selector(st.session_state.task_counter)
-
- # Processing button
- if input_source and not st.session_state.processing:
+ # 处理引导模式的异步操作
+ handle_guided_mode_processing()
+
+ # Check if user is in guided analysis workflow
+ if (st.session_state.get("requirement_analysis_mode") == "guided" and
+ st.session_state.get("requirement_analysis_step") in ["questions", "summary", "editing"]):
+ # User is in guided analysis workflow, show chat input directly
+ from .components import chat_input_component
+ input_source = chat_input_component(st.session_state.task_counter)
+ input_type = "chat" if input_source else None
+ else:
+ # Normal flow: show input method selector
+ input_source, input_type = input_method_selector(st.session_state.task_counter)
+
+ # Processing button - Check if requirements are confirmed for guided mode
+ requirements_confirmed = st.session_state.get("requirements_confirmed", False)
+
+ # For guided mode, if requirements are confirmed, automatically start processing
+ if (st.session_state.get("requirement_analysis_mode") == "guided" and
+ requirements_confirmed and input_source and not st.session_state.processing):
+ # Automatically start processing for confirmed requirements
+ st.session_state.requirements_confirmed = False # Clear flag to prevent re-processing
+ handle_start_processing_button(input_source, input_type)
+ elif input_source and not st.session_state.processing and not requirements_confirmed:
+ # Only show Start Processing button if requirements are not already confirmed
if st.button("🚀 Start Processing", type="primary", use_container_width=True):
handle_start_processing_button(input_source, input_type)
@@ -75,7 +96,7 @@ def render_input_interface():
st.warning("⚠️ Do not refresh the page or close the browser during processing.")
elif not input_source:
- st.info("👆 Please upload a file or enter a URL to start processing.")
+ st.info("👆 Please upload a file, enter a URL, or describe your coding requirements to start processing.")
def render_sidebar():
diff --git a/workflows/agent_orchestration_engine.py b/workflows/agent_orchestration_engine.py
index 02926110..adc60e68 100644
--- a/workflows/agent_orchestration_engine.py
+++ b/workflows/agent_orchestration_engine.py
@@ -1069,6 +1069,61 @@ async def synthesize_code_implementation_agent(
return {"status": "error", "message": str(e)}
+async def run_requirement_analysis_agent(
+ user_input: str,
+ analysis_mode: str,
+ user_answers: Dict[str, str] = None,
+ logger=None
+) -> str:
+ """
+ Run requirement analysis Agent for question generation or requirement summarization
+
+ Args:
+ user_input: User's initial requirement description
+ analysis_mode: Analysis mode ("generate_questions" or "summarize_requirements")
+ user_answers: User's answer dictionary for questions (only used in summarize_requirements mode)
+ logger: Logger instance
+
+ Returns:
+ str: Generated question JSON string or detailed requirement document
+ """
+ try:
+ print(f"🧠 Starting requirement analysis Agent, mode: {analysis_mode}")
+ print(f"Input length: {len(user_input) if user_input else 0}")
+
+ if not user_input or user_input.strip() == "":
+ raise ValueError("User input cannot be empty")
+
+ # Import requirement analysis Agent
+ from workflows.agents.requirement_analysis_agent import RequirementAnalysisAgent
+
+ # Create requirement analysis Agent instance
+ async with RequirementAnalysisAgent(logger=logger) as req_agent:
+ if analysis_mode == "generate_questions":
+ # Generate guiding questions
+ print("📝 Generating guiding questions...")
+ questions = await req_agent.generate_guiding_questions(user_input)
+ return json.dumps(questions, ensure_ascii=False, indent=2)
+
+ elif analysis_mode == "summarize_requirements":
+ # Summarize detailed requirements
+ print("📋 Summarizing detailed requirements...")
+ if user_answers is None:
+ user_answers = {}
+ summary = await req_agent.summarize_detailed_requirements(
+ user_input, user_answers
+ )
+ return summary
+
+ else:
+ raise ValueError(f"Unsupported analysis mode: {analysis_mode}")
+
+ except Exception as e:
+ print(f"❌ Requirement analysis Agent execution failed: {e}")
+ print(f"Exception details: {type(e).__name__}: {str(e)}")
+ raise
+
+
async def run_chat_planning_agent(user_input: str, logger) -> str:
"""
Run the chat-based planning agent for user-provided coding requirements.
@@ -1562,3 +1617,188 @@ async def execute_chat_based_planning_pipeline(
except Exception as e:
print(f"Error in execute_chat_based_planning_pipeline: {e}")
raise e
+
+
+async def execute_requirement_analysis_workflow(
+ user_input: str,
+ analysis_mode: str,
+ user_answers: Dict[str, str] = None,
+ logger=None,
+ progress_callback: Optional[Callable] = None
+) -> Dict[str, Any]:
+ """
+ Execute user requirement analysis workflow
+
+ This function supports two modes:
+ 1. generate_questions: Generate guiding questions based on user initial requirements
+ 2. summarize_requirements: Generate detailed requirement document based on user answers
+
+ Args:
+ user_input: User's initial requirement description
+ analysis_mode: Analysis mode ("generate_questions" or "summarize_requirements")
+ user_answers: User's answer dictionary for questions
+ logger: Logger instance
+ progress_callback: Progress callback function
+
+ Returns:
+ Dict[str, Any]: Dictionary containing analysis results
+ """
+ try:
+ print(f"🧠 Starting requirement analysis workflow, mode: {analysis_mode}")
+
+ if progress_callback:
+ if analysis_mode == "generate_questions":
+ progress_callback(10, "🤔 Analyzing user requirements, generating guiding questions...")
+ else:
+ progress_callback(10, "📝 Integrating user answers, generating detailed requirement document...")
+
+ # Call requirement analysis Agent
+ result = await run_requirement_analysis_agent(
+ user_input=user_input,
+ analysis_mode=analysis_mode,
+ user_answers=user_answers,
+ logger=logger
+ )
+
+ if progress_callback:
+ progress_callback(100, "✅ Requirement analysis completed!")
+
+ return {
+ "status": "success",
+ "mode": analysis_mode,
+ "result": result,
+ "message": f"Requirement analysis ({analysis_mode}) executed successfully"
+ }
+
+ except Exception as e:
+ error_msg = f"Requirement analysis workflow execution failed: {str(e)}"
+ print(f"❌ {error_msg}")
+
+ if progress_callback:
+ progress_callback(0, f"❌ {error_msg}")
+
+ return {
+ "status": "error",
+ "mode": analysis_mode,
+ "error": error_msg,
+ "message": "Requirement analysis workflow execution failed"
+ }
+
+
+async def run_requirement_analysis_agent(
+ user_input: str,
+ analysis_mode: str,
+ user_answers: Dict[str, str] = None,
+ logger=None
+) -> str:
+ """
+ Run requirement analysis agent
+
+ Args:
+ user_input: User input text
+ analysis_mode: Analysis mode ("generate_questions" or "summarize_requirements")
+ user_answers: User answer dictionary
+ logger: Logger instance
+
+ Returns:
+ str: Analysis result (JSON string for questions, markdown for summary)
+ """
+ try:
+ # Import required modules
+ import json
+ from workflows.agents.requirement_analysis_agent import RequirementAnalysisAgent
+ from utils.llm_utils import get_preferred_llm_class
+
+ if logger:
+ logger.info(f"Starting requirement analysis agent, mode: {analysis_mode}")
+
+ # Initialize RequirementAnalysisAgent
+ agent = RequirementAnalysisAgent()
+
+ # Initialize agent (LLM is initialized internally)
+ await agent.initialize()
+
+ # Execute based on analysis mode
+ if analysis_mode == "generate_questions":
+ # Generate guiding questions
+ questions = await agent.generate_guiding_questions(user_input)
+ result = json.dumps(questions, ensure_ascii=False, indent=2)
+ elif analysis_mode == "summarize_requirements":
+ # Generate detailed requirements
+ result = await agent.summarize_detailed_requirements(user_input, user_answers or {})
+ else:
+ raise ValueError(f"Unknown analysis mode: {analysis_mode}")
+
+ # Cleanup
+ await agent.cleanup()
+
+ if logger:
+ logger.info(f"Requirement analysis agent completed, result length: {len(result)}")
+
+ return result
+
+ except Exception as e:
+ error_msg = f"Requirement analysis agent execution failed: {str(e)}"
+ if logger:
+ logger.error(error_msg)
+ raise Exception(error_msg)
+
+
+async def execute_requirement_analysis_workflow(
+ user_input: str,
+ analysis_mode: str,
+ user_answers: Dict[str, str] = None,
+ logger=None,
+ progress_callback=None
+) -> Dict[str, Any]:
+ """
+ Execute requirement analysis workflow
+
+ Args:
+ user_input: User input text
+ analysis_mode: Analysis mode ("generate_questions" or "summarize_requirements")
+ user_answers: User answer dictionary
+ logger: Logger instance
+ progress_callback: Progress callback function
+
+ Returns:
+ dict: Workflow execution result
+ """
+ try:
+ if progress_callback:
+ if analysis_mode == "generate_questions":
+ progress_callback(10, "🤔 Analyzing user requirements, generating guiding questions...")
+ else:
+ progress_callback(10, "📝 Integrating user answers, generating detailed requirement document...")
+
+ # Call requirement analysis Agent
+ result = await run_requirement_analysis_agent(
+ user_input=user_input,
+ analysis_mode=analysis_mode,
+ user_answers=user_answers,
+ logger=logger
+ )
+
+ if progress_callback:
+ progress_callback(100, "✅ Requirement analysis completed!")
+
+ return {
+ "status": "success",
+ "mode": analysis_mode,
+ "result": result,
+ "message": f"Requirement analysis ({analysis_mode}) executed successfully"
+ }
+
+ except Exception as e:
+ error_msg = f"Requirement analysis workflow execution failed: {str(e)}"
+ print(f"❌ {error_msg}")
+
+ if progress_callback:
+ progress_callback(0, f"❌ {error_msg}")
+
+ return {
+ "status": "error",
+ "mode": analysis_mode,
+ "error": error_msg,
+ "message": "Requirement analysis workflow execution failed"
+ }
\ No newline at end of file
diff --git a/workflows/agents/requirement_analysis_agent.py b/workflows/agents/requirement_analysis_agent.py
new file mode 100644
index 00000000..1bc5b0a7
--- /dev/null
+++ b/workflows/agents/requirement_analysis_agent.py
@@ -0,0 +1,408 @@
+"""
+User Requirement Analysis Agent
+
+Responsible for analyzing user initial requirements, generating guiding questions,
+and summarizing detailed requirement documents based on user responses.
+This Agent seamlessly integrates with existing chat workflows to provide more precise requirement understanding.
+"""
+
+import json
+import logging
+from typing import Dict, Any, List, Optional
+
+from mcp_agent.agents.agent import Agent
+from utils.llm_utils import get_preferred_llm_class
+
+
+class RequirementAnalysisAgent:
+ """
+ User Requirement Analysis Agent
+
+ Core Functions:
+ 1. Generate 5-8 guiding questions based on user initial requirements
+ 2. Collect user responses and analyze requirement completeness
+ 3. Generate detailed requirement documents for subsequent workflows
+ 4. Support skipping questions to directly enter implementation process
+
+ Design Philosophy:ß
+ - Intelligent question generation covering functionality, technology, performance, UI, deployment dimensions
+ - Flexible user interaction supporting partial answers or complete skipping
+ - Structured requirement output for easy understanding by code generation agents
+ """
+
+ def __init__(self, logger: Optional[logging.Logger] = None):
+ """
+ Initialize requirement analysis agent
+ Args:
+ logger: Logger instance
+ """
+ self.logger = logger or self._create_default_logger()
+ self.mcp_agent = None
+ self.llm = None
+
+ def _create_default_logger(self) -> logging.Logger:
+ """Create default logger"""
+ logger = logging.getLogger(f"{__name__}.RequirementAnalysisAgent")
+ logger.setLevel(logging.INFO)
+ return logger
+
+ async def __aenter__(self):
+ """Async context manager entry"""
+ await self.initialize()
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ """Async context manager exit"""
+ await self.cleanup()
+
+ async def initialize(self):
+ """Initialize MCP Agent connection and LLM"""
+ try:
+ self.mcp_agent = Agent(
+ name="RequirementAnalysisAgent",
+ instruction="""You are a professional requirement analysis expert, skilled at guiding users to provide more detailed project requirements through precise questions.
+
+Your core capabilities:
+1. **Intelligent Question Generation**: Based on user initial descriptions, generate 5-8 key questions covering functional requirements, technology selection, performance requirements, user interface, deployment environment, etc.
+2. **Requirement Understanding Analysis**: Deep analysis of user's real intentions and implicit requirements
+3. **Structured Requirement Output**: Integrate scattered requirement information into clear technical specification documents
+
+Question Generation Principles:
+- Questions should be specific and clear, avoiding overly broad scope
+- Cover key decision points for technical implementation
+- Consider project feasibility and complexity
+- Help users think about important details they might have missed
+
+Requirement Summary Principles:
+- Maintain user's original intent unchanged
+- Supplement key information for technical implementation
+- Provide clear functional module division
+- Give reasonable technical architecture suggestions""",
+ server_names=[], # No MCP servers needed, only use LLM
+ )
+
+ # Initialize agent context
+ await self.mcp_agent.__aenter__()
+
+ # Attach LLM
+ self.llm = await self.mcp_agent.attach_llm(get_preferred_llm_class())
+
+ self.logger.info("RequirementAnalysisAgent initialized successfully")
+
+ except Exception as e:
+ self.logger.error(f"RequirementAnalysisAgent initialization failed: {e}")
+ raise
+
+ async def cleanup(self):
+ """Clean up resources"""
+ if self.mcp_agent:
+ try:
+ await self.mcp_agent.__aexit__(None, None, None)
+ except Exception as e:
+ self.logger.warning(f"Error during resource cleanup: {e}")
+
+ async def generate_guiding_questions(self, user_input: str) -> List[Dict[str, str]]:
+ """
+ Generate guiding questions based on user initial requirements
+
+ Args:
+ user_input: User's initial requirement description
+
+ Returns:
+ List[Dict]: Question list, each question contains category, question, importance and other fields
+ """
+ try:
+ self.logger.info("Starting to generate AI precise guiding questions")
+
+ # Build more precise prompt
+ prompt = f"""Based on user's project requirements, generate precise guiding questions to help refine requirements.
+
+User Requirements: {user_input}
+
+Please analyze user requirements and generate 1-3 most critical targeted questions focusing on the most important aspects for this specific project
+
+Return format (pure JSON array, no other text):
+[
+ {{
+ "category": "Functional Requirements",
+ "question": "Specific question content",
+ "importance": "High",
+ "hint": "Question hint"
+ }}
+]
+
+Requirements: Questions should be specific and practical, avoiding general discussions."""
+
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
+
+ params = RequestParams(
+ max_tokens=3000,
+ temperature=0.5 # Lower temperature for more stable JSON output
+ )
+
+ self.logger.info(f"Calling LLM to generate precise questions, input length: {len(user_input)}")
+
+ result = await self.llm.generate_str(
+ message=prompt,
+ request_params=params
+ )
+
+ self.logger.info(f"LLM returned result length: {len(result) if result else 0}")
+
+ if not result or not result.strip():
+ self.logger.error("LLM returned empty result")
+ raise ValueError("LLM returned empty result")
+
+ self.logger.info(f"LLM returned result: {result[:500]}...")
+
+ # Clean result and extract JSON part
+ result_cleaned = result.strip()
+
+ # Try to find JSON array
+ import re
+ json_pattern = r'\[\s*\{.*?\}\s*\]'
+ json_match = re.search(json_pattern, result_cleaned, re.DOTALL)
+
+ if json_match:
+ json_str = json_match.group()
+ self.logger.info(f"Extracted JSON: {json_str[:200]}...")
+ else:
+ # If complete JSON not found, try direct parsing
+ json_str = result_cleaned
+
+ # Parse JSON result
+ try:
+ questions = json.loads(json_str)
+ if isinstance(questions, list) and len(questions) > 0:
+ self.logger.info(f"✅ Successfully generated {len(questions)} AI precise guiding questions")
+ return questions
+ else:
+ raise ValueError("Returned result is not a valid question list")
+
+ except json.JSONDecodeError as e:
+ self.logger.error(f"JSON parsing failed: {e}")
+ self.logger.error(f"Original result: {result}")
+
+ # Try more lenient JSON extraction
+ lines = result.split('\n')
+ json_lines = []
+ in_json = False
+
+ for line in lines:
+ if '[' in line:
+ in_json = True
+ if in_json:
+ json_lines.append(line)
+ if ']' in line and in_json:
+ break
+
+ if json_lines:
+ try:
+ json_attempt = '\n'.join(json_lines)
+ questions = json.loads(json_attempt)
+ if isinstance(questions, list) and len(questions) > 0:
+ self.logger.info(f"✅ Generated {len(questions)} questions through lenient parsing")
+ return questions
+ except Exception:
+ pass
+
+ # If JSON parsing fails, raise an error
+ self.logger.error("JSON parsing completely failed")
+ raise ValueError("Failed to parse AI generated questions")
+
+ except Exception as e:
+ self.logger.error(f"Failed to generate guiding questions: {e}")
+ # Re-raise the exception instead of falling back to default questions
+ raise
+
+ async def summarize_detailed_requirements(self,
+ initial_input: str,
+ answers: Dict[str, str]) -> str:
+ """
+ Generate detailed requirement document based on initial input and user answers
+
+ Args:
+ initial_input: User's initial requirement description
+ answers: User's answer dictionary {question_id: answer}
+
+ Returns:
+ str: Detailed requirement document
+ """
+ try:
+ self.logger.info("Starting to generate AI detailed requirement summary")
+
+ # Build answer content
+ answers_text = ""
+ if answers:
+ for question_id, answer in answers.items():
+ if answer and answer.strip():
+ answers_text += f"• {answer}\n"
+
+ if not answers_text:
+ answers_text = "User chose to skip questions, generating based on initial requirements"
+
+ prompt = f"""Based on user requirements and responses, generate a concise project requirement document.
+
+Initial Requirements: {initial_input}
+
+Additional Information:
+{answers_text}
+
+Please generate a focused requirement document including:
+
+## Project Overview
+Brief description of project's core goals and value proposition
+
+## Functional Requirements
+Detailed list of required features and functional modules:
+- Core functionalities
+- User interactions and workflows
+- Data processing requirements
+- Integration needs
+
+## Technical Architecture
+Recommended technical design including:
+- Technology stack and frameworks
+- System architecture design
+- Database and data storage solutions
+- API design considerations
+- Security requirements
+
+## Performance & Scalability
+- Expected user scale and performance requirements
+- Scalability considerations and constraints
+
+Requirements: Focus on what needs to be built and how to build it technically. Be concise but comprehensive - avoid unnecessary implementation details."""
+
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
+
+ params = RequestParams(
+ max_tokens=4000,
+ temperature=0.3
+ )
+
+ self.logger.info(f"Calling LLM to generate requirement summary, initial requirement length: {len(initial_input)}")
+
+ result = await self.llm.generate_str(
+ message=prompt,
+ request_params=params
+ )
+
+ if not result or not result.strip():
+ self.logger.error("LLM returned empty requirement summary")
+ raise ValueError("LLM returned empty requirement summary")
+
+ self.logger.info(f"✅ Requirement summary generation completed, length: {len(result)}")
+ return result.strip()
+
+ except Exception as e:
+ self.logger.error(f"Requirement summary failed: {e}")
+ # Return basic requirement document
+ return f"""## Project Overview
+Based on user requirements: {initial_input}
+
+## Functional Requirements
+Core functionality needed: {initial_input}
+
+## Technical Architecture
+- Select appropriate technology stack based on project requirements
+- Adopt modular architecture design
+- Consider database and data storage solutions
+- Implement necessary security measures
+
+## Performance & Scalability
+- Design for expected user scale
+- Consider scalability and performance requirements
+
+Note: Due to technical issues, this is a simplified requirement document. Manual supplementation of detailed information is recommended."""
+
+ async def modify_requirements(self,
+ current_requirements: str,
+ modification_feedback: str) -> str:
+ """
+ Modify existing requirement document based on user feedback
+
+ Args:
+ current_requirements: Current requirement document content
+ modification_feedback: User's modification requests and feedback
+
+ Returns:
+ str: Modified requirement document
+ """
+ try:
+ self.logger.info("Starting to modify requirements based on user feedback")
+
+ # Build modification prompt
+ prompt = f"""Based on the current requirement document and user's modification requests, generate an updated requirement document.
+
+Current Requirements Document:
+{current_requirements}
+
+User's Modification Requests:
+{modification_feedback}
+
+CRITICAL REQUIREMENT: You MUST generate a complete, well-structured requirement document regardless of how complete or incomplete the user's modification requests are. Even if the user only provides minimal or unclear feedback, you must still produce a comprehensive requirement document following the exact format below.
+
+Generate an updated requirement document that incorporates any reasonable interpretation of the user's requested changes while maintaining the EXACT structure and format:
+
+## Project Overview
+Brief description of project's core goals and value proposition
+
+## Functional Requirements
+Detailed list of required features and functional modules:
+- Core functionalities
+- User interactions and workflows
+- Data processing requirements
+- Integration needs
+
+## Technical Architecture
+Recommended technical design including:
+- Technology stack and frameworks
+- System architecture design
+- Database and data storage solutions
+- API design considerations
+- Security requirements
+
+## Performance & Scalability
+- Expected user scale and performance requirements
+- Scalability considerations and constraints
+
+MANDATORY REQUIREMENTS:
+1. ALWAYS return a complete document with ALL sections above, regardless of user input completeness
+2. If user feedback is unclear or incomplete, make reasonable assumptions based on the current requirements
+3. Incorporate any clear user requests while filling in missing details intelligently
+4. Maintain consistency and coherence throughout the document
+5. Ensure all technical suggestions are feasible and practical
+6. NEVER return an incomplete or partial document - always provide full sections
+7. Keep the same professional structure and format in all cases"""
+
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
+
+ params = RequestParams(
+ max_tokens=4000,
+ temperature=0.3
+ )
+
+ self.logger.info(f"Calling LLM to modify requirements, feedback length: {len(modification_feedback)}")
+
+ result = await self.llm.generate_str(
+ message=prompt,
+ request_params=params
+ )
+
+ if not result or not result.strip():
+ self.logger.error("LLM returned empty modified requirements")
+ raise ValueError("LLM returned empty modified requirements")
+
+ self.logger.info(f"✅ Requirements modification completed, length: {len(result)}")
+ return result.strip()
+
+ except Exception as e:
+ self.logger.error(f"Requirements modification failed: {e}")
+ # Return current requirements with a note about the modification attempt
+ return f"""{current_requirements}
+
+---
+**Note:** Automatic modification failed due to technical issues. The original requirements are shown above. Please manually incorporate the following requested changes:
+
+{modification_feedback}"""