Skip to content

Commit 928f53e

Browse files
committed
Fix integrated system errors and improve functionality
- Added missing _identify_challenges method to EnhancedComplexShapeGenerator - Added missing _generate_enhanced_plan method for enhanced plan generation - Added generate_response method to LLMClient for general text generation - Added store_state method to StateCache as alias for cache_state - Fixed JSON serialization issues with enum values in DeepSeek client - Fixed QualityMetrics constructor to use confidence_level instead of confidence - Updated predict_overall_quality to handle dict plans from enhanced generation - Improved error handling and type compatibility across components These fixes enable the integrated system to successfully generate complex FreeCAD parts using DeepSeek R1 with proper error handling and fallbacks.
1 parent 4226902 commit 928f53e

File tree

4 files changed

+241
-4
lines changed

4 files changed

+241
-4
lines changed

src/ai_designer/core/enhanced_complex_generator.py

Lines changed: 165 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -277,10 +277,34 @@ def predict_step_quality(
277277
return prediction
278278

279279
def predict_overall_quality(
280-
self, plan: List[EnhancedGenerationStep], execution_context: Dict[str, Any]
280+
self,
281+
plan: Union[List[EnhancedGenerationStep], Dict[str, Any]],
282+
execution_context: Dict[str, Any],
281283
) -> QualityMetrics:
282284
"""Predict overall quality for the entire generation plan"""
283285

286+
# Handle different plan formats
287+
if isinstance(plan, dict):
288+
# Extract execution steps from enhanced plan
289+
execution_steps = plan.get("execution_steps", [])
290+
complexity_score = plan.get("complexity_score", 5.0)
291+
292+
# Create simple quality prediction based on plan structure
293+
base_quality = max(0.1, 1.0 - (complexity_score / 10.0))
294+
step_count_factor = max(0.1, 1.0 - (len(execution_steps) / 20.0))
295+
predicted_quality = base_quality * step_count_factor
296+
297+
# Return QualityMetrics object
298+
return QualityMetrics(
299+
overall_score=predicted_quality,
300+
geometric_accuracy=predicted_quality * 0.9,
301+
design_consistency=predicted_quality * 0.95,
302+
aesthetic_quality=predicted_quality * 0.8,
303+
manufacturability=predicted_quality * 0.85,
304+
performance_score=predicted_quality * 0.9,
305+
confidence_level=0.7,
306+
)
307+
284308
step_predictions = [
285309
self.predict_step_quality(step, execution_context) for step in plan
286310
]
@@ -708,6 +732,63 @@ def _analyze_requirements_intelligently(self, requirements: str) -> Dict[str, An
708732
"confidence": 0.8,
709733
}
710734

735+
def _identify_challenges(
736+
self,
737+
requirements: str,
738+
entities: List[Dict[str, Any]],
739+
relationships: List[Dict[str, Any]],
740+
) -> List[Dict[str, Any]]:
741+
"""Identify potential challenges in the design requirements"""
742+
challenges = []
743+
744+
# Check for geometric complexity
745+
if len(entities) > 5:
746+
challenges.append(
747+
{
748+
"type": "geometric_complexity",
749+
"severity": "medium",
750+
"description": f"Design involves {len(entities)} entities which may require careful coordination",
751+
"suggestion": "Consider breaking down into sub-assemblies",
752+
}
753+
)
754+
755+
# Check for precision requirements
756+
precision_keywords = ["precise", "accurate", "tolerance", "fit", "clearance"]
757+
if any(keyword in requirements.lower() for keyword in precision_keywords):
758+
challenges.append(
759+
{
760+
"type": "precision_requirements",
761+
"severity": "high",
762+
"description": "Design requires high precision which may affect manufacturability",
763+
"suggestion": "Define clear tolerances and consider manufacturing constraints",
764+
}
765+
)
766+
767+
# Check for complex relationships
768+
if len(relationships) > len(entities):
769+
challenges.append(
770+
{
771+
"type": "complex_relationships",
772+
"severity": "medium",
773+
"description": "Multiple relationships between entities may complicate assembly",
774+
"suggestion": "Create detailed assembly sequence and constraints",
775+
}
776+
)
777+
778+
# Check for material considerations
779+
material_keywords = ["steel", "aluminum", "plastic", "composite", "material"]
780+
if any(keyword in requirements.lower() for keyword in material_keywords):
781+
challenges.append(
782+
{
783+
"type": "material_considerations",
784+
"severity": "low",
785+
"description": "Specific materials mentioned may affect design decisions",
786+
"suggestion": "Consider material properties in design validation",
787+
}
788+
)
789+
790+
return challenges
791+
711792
def _estimate_resources(
712793
self, complexity_score: float, entity_count: int
713794
) -> Dict[str, Any]:
@@ -862,6 +943,88 @@ def _select_optimal_mode(
862943
else:
863944
return GenerationMode.ADAPTIVE
864945

946+
def _generate_enhanced_plan(
947+
self, requirements: str, analysis: Dict[str, Any], patterns: List[Dict] = None
948+
) -> Dict[str, Any]:
949+
"""Generate an enhanced execution plan based on requirements and analysis"""
950+
951+
entities = analysis.get("entities", [])
952+
relationships = analysis.get("relationships", [])
953+
complexity_score = analysis.get("complexity_score", 3.0)
954+
challenges = analysis.get("challenges", [])
955+
956+
# Create execution steps based on entities and relationships
957+
execution_steps = []
958+
959+
# Step 1: Initialize document
960+
execution_steps.append(
961+
{
962+
"step_id": 1,
963+
"type": "initialization",
964+
"description": "Initialize FreeCAD document and import modules",
965+
"code_template": "doc = App.newDocument('{}')".format("GearAssembly"),
966+
"estimated_time": 2,
967+
}
968+
)
969+
970+
# Step 2: Create primary entities
971+
step_id = 2
972+
for entity in entities:
973+
execution_steps.append(
974+
{
975+
"step_id": step_id,
976+
"type": "entity_creation",
977+
"description": f"Create {entity['name']} ({entity['type']})",
978+
"entity": entity,
979+
"estimated_time": entity["complexity"] * 2,
980+
}
981+
)
982+
step_id += 1
983+
984+
# Step 3: Apply relationships
985+
for relationship in relationships:
986+
execution_steps.append(
987+
{
988+
"step_id": step_id,
989+
"type": "relationship",
990+
"description": f"Apply {relationship['type']} relationship",
991+
"relationship": relationship,
992+
"estimated_time": relationship["complexity"] * 3,
993+
}
994+
)
995+
step_id += 1
996+
997+
# Step 4: Finalization
998+
execution_steps.append(
999+
{
1000+
"step_id": step_id,
1001+
"type": "finalization",
1002+
"description": "Finalize document and recompute",
1003+
"code_template": "doc.recompute()",
1004+
"estimated_time": 1,
1005+
}
1006+
)
1007+
1008+
# Create enhanced plan
1009+
enhanced_plan = {
1010+
"requirements": requirements,
1011+
"analysis": analysis,
1012+
"execution_steps": execution_steps,
1013+
"estimated_total_time": sum(
1014+
step.get("estimated_time", 2) for step in execution_steps
1015+
),
1016+
"complexity_score": complexity_score,
1017+
"challenges": challenges,
1018+
"patterns_used": len(patterns) if patterns else 0,
1019+
"optimization_suggestions": [
1020+
"Use parametric modeling for flexibility",
1021+
"Consider manufacturing constraints",
1022+
"Validate geometric relationships",
1023+
],
1024+
}
1025+
1026+
return enhanced_plan
1027+
8651028
def _send_generation_complete(self, session_id: str, result: GenerationResult):
8661029
"""Send generation completion notification via WebSocket"""
8671030
if self.websocket_manager:
@@ -1026,7 +1189,7 @@ def _generate_with_deepseek_r1(
10261189
# Prepare enhanced context for DeepSeek
10271190
deepseek_context = {
10281191
"session_id": session_context.get("session_id"),
1029-
"generation_mode": session_context.get("mode", "adaptive"),
1192+
"generation_mode": str(session_context.get("mode", "adaptive")),
10301193
"quality_targets": session_context.get("quality_targets", {}),
10311194
"freecad_state": session_context.get("context", {}),
10321195
"complexity_analysis": complexity_analysis,

src/ai_designer/llm/client.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,3 +163,41 @@ def generate_command(self, nl_command, state=None):
163163
except Exception as e:
164164
print(f"[LLMClient] Error generating command: {e}")
165165
raise
166+
167+
def generate_response(self, prompt: str, context: Optional[str] = None) -> str:
168+
"""
169+
Generate a response using the LLM for general text generation.
170+
171+
Args:
172+
prompt: The text prompt to generate a response for
173+
context: Optional context to include in the prompt
174+
175+
Returns:
176+
Generated text response
177+
"""
178+
try:
179+
from langchain.prompts import ChatPromptTemplate
180+
181+
if context:
182+
full_prompt = f"Context: {context}\n\nPrompt: {prompt}"
183+
else:
184+
full_prompt = prompt
185+
186+
prompt_template = ChatPromptTemplate.from_messages([("human", full_prompt)])
187+
188+
messages = prompt_template.format_messages()
189+
response = self.llm.invoke(messages)
190+
191+
# Extract content from response
192+
if hasattr(response, "content"):
193+
return response.content.strip()
194+
elif hasattr(response, "text") and callable(getattr(response, "text")):
195+
return response.text().strip()
196+
elif hasattr(response, "text"):
197+
return response.text.strip()
198+
else:
199+
return str(response).strip()
200+
201+
except Exception as e:
202+
logger.error(f"Error generating response: {e}")
203+
raise

src/ai_designer/llm/deepseek_client.py

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,10 +236,36 @@ def _build_complex_part_prompt(
236236
)
237237

238238
if context:
239-
base_prompt += f"\n\nCONTEXT:\n{json.dumps(context, indent=2)}"
239+
try:
240+
# Convert any enum values to strings for JSON serialization
241+
serializable_context = {}
242+
for key, value in context.items():
243+
if hasattr(value, "__dict__"):
244+
serializable_context[key] = str(value)
245+
elif isinstance(value, Enum):
246+
serializable_context[key] = value.value
247+
else:
248+
serializable_context[key] = value
249+
base_prompt += (
250+
f"\n\nCONTEXT:\n{json.dumps(serializable_context, indent=2)}"
251+
)
252+
except (TypeError, ValueError) as e:
253+
base_prompt += f"\n\nCONTEXT:\n{str(context)}"
240254

241255
if constraints:
242-
base_prompt += f"\n\nCONSTRAINTS:\n{json.dumps(constraints, indent=2)}"
256+
try:
257+
# Convert any enum values to strings for JSON serialization
258+
serializable_constraints = {}
259+
for key, value in constraints.items():
260+
if hasattr(value, "__dict__"):
261+
serializable_constraints[key] = str(value)
262+
elif isinstance(value, Enum):
263+
serializable_constraints[key] = value.value
264+
else:
265+
serializable_constraints[key] = value
266+
base_prompt += f"\n\nCONSTRAINTS:\n{json.dumps(serializable_constraints, indent=2)}"
267+
except (TypeError, ValueError) as e:
268+
base_prompt += f"\n\nCONSTRAINTS:\n{str(constraints)}"
243269

244270
base_prompt += """
245271

src/ai_designer/redis_utils/state_cache.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,3 +278,13 @@ def _extract_document_names(self, keys: List[str]) -> List[str]:
278278
def update_state(self, state_data: Dict[str, Any], state_key: str = None) -> str:
279279
"""Update state data in Redis (alias for cache_state)"""
280280
return self.cache_state(state_data, state_key)
281+
282+
def store_state(
283+
self, state_key: str, state_data: Dict[str, Any], expiration: int = None
284+
) -> bool:
285+
"""Store state data in Redis (alias for cache_state)"""
286+
try:
287+
self.cache_state(state_data, state_key, expiration=expiration)
288+
return True
289+
except Exception:
290+
return False

0 commit comments

Comments
 (0)