|
1 | | -from typing import Dict, Any, List, Optional |
| 1 | +from typing import Dict, Any, List, Optional, Iterator |
2 | 2 | import logging |
3 | 3 | from packages.memory.services.graph_service import GraphService |
4 | 4 | from packages.memory.llm import BaseLLMProvider |
@@ -104,6 +104,85 @@ def analyze_issue(self, title: str, body: str) -> Dict[str, Any]: |
104 | 104 | "context_used": [self._format_item_summary(item) for item in all_context] |
105 | 105 | } |
106 | 106 |
|
| 107 | + def analyze_issue_stream(self, title: str, body: str) -> Iterator[Dict[str, Any]]: |
| 108 | + """ |
| 109 | + Analyze an issue and stream the report. |
| 110 | + |
| 111 | + Args: |
| 112 | + title: Issue title |
| 113 | + body: Issue body/description |
| 114 | + |
| 115 | + Returns: |
| 116 | + Iterator yielding chunks of the report or context info |
| 117 | + """ |
| 118 | + issue_text = f"{title}\n\n{body}" |
| 119 | + logger.info(f"Analyzing issue (streaming): {title}") |
| 120 | + |
| 121 | + # 1. Search for relevant code (Functions, Classes, Files) |
| 122 | + code_context = self.graph_service.hybrid_search( |
| 123 | + query_text=issue_text, |
| 124 | + node_type="Function", |
| 125 | + limit=5 |
| 126 | + ) |
| 127 | + |
| 128 | + # Also search for Files directly |
| 129 | + file_context = self.graph_service.hybrid_search( |
| 130 | + query_text=issue_text, |
| 131 | + node_type="File", |
| 132 | + limit=3 |
| 133 | + ) |
| 134 | + |
| 135 | + # 2. Search for relevant history (Commits) |
| 136 | + commit_context = self.graph_service.hybrid_search( |
| 137 | + query_text=issue_text, |
| 138 | + node_type="Commit", |
| 139 | + limit=5 |
| 140 | + ) |
| 141 | + |
| 142 | + # Combine context |
| 143 | + all_context = code_context + file_context + commit_context |
| 144 | + |
| 145 | + if not all_context: |
| 146 | + yield {"error": "No relevant context found in the knowledge graph."} |
| 147 | + return |
| 148 | + |
| 149 | + # Yield context info first |
| 150 | + yield { |
| 151 | + "context_used": [self._format_item_summary(item) for item in all_context] |
| 152 | + } |
| 153 | + |
| 154 | + # 3. Generate Report using LLM |
| 155 | + system_prompt = """ |
| 156 | + You are an expert software engineer and debugger. |
| 157 | + You are given a GitHub issue description and a set of relevant code snippets and commit history from the project's Knowledge Graph. |
| 158 | + |
| 159 | + Your task is to analyze the issue and provide a detailed report containing: |
| 160 | + 1. **Root Cause Analysis**: What is likely causing the issue based on the code and history? |
| 161 | + 2. **Affected Areas**: Which files, classes, or functions are involved? |
| 162 | + 3. **Suggested Fix**: How can this be fixed? Provide code snippets if possible. |
| 163 | + 4. **Relevant History**: Are there recent commits that might have introduced this? |
| 164 | + |
| 165 | + Be specific. Reference the filenames and function names provided in the context. |
| 166 | + """ |
| 167 | + |
| 168 | + # Format context for LLM |
| 169 | + context_str = self._format_context(all_context) |
| 170 | + |
| 171 | + prompt = f""" |
| 172 | + ISSUE TITLE: {title} |
| 173 | + |
| 174 | + ISSUE BODY: |
| 175 | + {body} |
| 176 | + |
| 177 | + RELEVANT CONTEXT FROM KNOWLEDGE GRAPH: |
| 178 | + {context_str} |
| 179 | + |
| 180 | + Please provide your analysis report. |
| 181 | + """ |
| 182 | + |
| 183 | + for chunk in self.llm_provider.stream_text(prompt=prompt, system_prompt=system_prompt): |
| 184 | + yield {"chunk": chunk} |
| 185 | + |
107 | 186 | def _format_context(self, items: List[Any]) -> str: |
108 | 187 | output = [] |
109 | 188 | for item in items: |
|
0 commit comments