Skip to content

Commit cbbc025

Browse files
dpageclaude
andcommitted
Address remaining CodeRabbit review feedback for streaming and rendering.
- Use distinct 3-tuple ('complete', text, messages) for completion events to avoid ambiguity with ('tool_use', [...]) 2-tuples in chat streaming. - Pass conversation history from request into chat_with_database_stream() so follow-up NLQ turns retain context. - Add re.IGNORECASE to SQL fence regex for case-insensitive matching. - Render MarkdownContent as block element instead of span to avoid invalid DOM when response contains paragraphs, lists, or tables. - Keep stop notice as a separate message instead of appending to partial markdown, preventing it from being swallowed by open code fences. - Snapshot streamingIdRef before setMessages in error handler to avoid race condition where ref is cleared before React executes the updater. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 288ac98 commit cbbc025

4 files changed

Lines changed: 48 additions & 21 deletions

File tree

web/pgadmin/llm/chat.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,8 +174,8 @@ def chat_with_database_stream(
174174
Yields:
175175
str: Text content chunks from the final LLM response.
176176
177-
The last item yielded is a tuple of
178-
(final_response_text, updated_conversation_history).
177+
The last item yielded is a 3-tuple of
178+
('complete', final_response_text, updated_conversation_history).
179179
180180
Raises:
181181
LLMClientError: If the LLM request fails.
@@ -220,8 +220,9 @@ def chat_with_database_stream(
220220
messages.append(response.to_message())
221221

222222
if response.stop_reason != StopReason.TOOL_USE:
223-
# Final response - yield the completion tuple
224-
yield (response.content, messages)
223+
# Final response - yield a 3-tuple to distinguish from
224+
# the 2-tuple tool_use event
225+
yield ('complete', response.content, messages)
225226
return
226227

227228
# Signal that tools are being executed so the caller can

web/pgadmin/tools/sqleditor/__init__.py

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2875,6 +2875,7 @@ def nlq_chat_stream(trans_id):
28752875
data = request.get_json(silent=True) or {}
28762876
user_message = data.get('message', '').strip()
28772877
conversation_id = data.get('conversation_id')
2878+
history_data = data.get('history', [])
28782879

28792880
if not user_message:
28802881
return make_json_response(
@@ -2885,6 +2886,10 @@ def nlq_chat_stream(trans_id):
28852886
def generate():
28862887
"""Generator for SSE events."""
28872888
import secrets as py_secrets
2889+
from pgadmin.llm.compaction import (
2890+
deserialize_history, compact_history
2891+
)
2892+
from pgadmin.llm.utils import get_default_provider
28882893

28892894
try:
28902895
# Send thinking status
@@ -2893,13 +2898,24 @@ def generate():
28932898
'message': gettext('Analyzing your request...')
28942899
})
28952900

2901+
# Deserialize and compact conversation history
2902+
conversation_history = None
2903+
if history_data:
2904+
conversation_history = deserialize_history(history_data)
2905+
provider = get_default_provider() or 'openai'
2906+
conversation_history = compact_history(
2907+
conversation_history,
2908+
provider=provider
2909+
)
2910+
28962911
# Stream the LLM response with database tools
28972912
response_text = ''
28982913
for item in chat_with_database_stream(
28992914
user_message=user_message,
29002915
sid=trans_obj.sid,
29012916
did=trans_obj.did,
2902-
system_prompt=NLQ_SYSTEM_PROMPT
2917+
system_prompt=NLQ_SYSTEM_PROMPT,
2918+
conversation_history=conversation_history
29032919
):
29042920
if isinstance(item, str):
29052921
# Text chunk from streaming LLM response
@@ -2916,15 +2932,16 @@ def generate():
29162932
'Querying the database...'
29172933
)
29182934
})
2919-
elif isinstance(item, tuple):
2920-
# Final result: (response_text, messages)
2921-
response_text = item[0]
2935+
elif isinstance(item, tuple) and \
2936+
item[0] == 'complete':
2937+
# Final result: ('complete', response_text, messages)
2938+
response_text = item[1]
29222939

29232940
# Extract SQL from markdown code fences
29242941
sql_blocks = re.findall(
29252942
r'```(?:sql|pgsql|postgresql)\s*\n(.*?)```',
29262943
response_text,
2927-
re.DOTALL
2944+
re.DOTALL | re.IGNORECASE
29282945
)
29292946
sql = ';\n\n'.join(
29302947
block.strip().rstrip(';') for block in sql_blocks

web/pgadmin/tools/sqleditor/static/js/components/sections/NLQChatPanel.jsx

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -550,9 +550,8 @@ function ChatMessage({ message, onInsertSQL, onReplaceSQL, textColors, cmKey, fo
550550
const content = seg.content?.trim();
551551
if (!content && !cursor) return null;
552552
return (
553-
<Box key={idx} sx={{ marginTop: idx > 0 ? 1 : 0, display: 'inline' }}>
553+
<Box key={idx} sx={{ marginTop: idx > 0 ? 1 : 0 }}>
554554
<MarkdownContent
555-
component="span"
556555
dangerouslySetInnerHTML={{ __html: renderMarkdownText(content || '') }}
557556
/>
558557
{cursor}
@@ -882,13 +881,18 @@ export function NLQChatPanel() {
882881
// Check if user manually stopped
883882
if (stoppedRef.current) {
884883
const streamId = streamingIdRef.current;
885-
// If we have partial streaming content, show it as-is
884+
// If we have partial streaming content, show it separately
885+
// from the stop notice to avoid breaking open markdown fences
886886
if (streamingTextRef.current) {
887887
setMessages((prev) => [
888888
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
889889
{
890890
type: MESSAGE_TYPES.ASSISTANT,
891-
content: streamingTextRef.current + '\n\n' + gettext('(Generation stopped)'),
891+
content: streamingTextRef.current,
892+
},
893+
{
894+
type: MESSAGE_TYPES.ASSISTANT,
895+
content: gettext('Generation stopped.'),
892896
},
893897
]);
894898
} else {
@@ -912,13 +916,17 @@ export function NLQChatPanel() {
912916
if (error.name === 'AbortError') {
913917
// Check if this was a user-initiated stop or a timeout
914918
if (stoppedRef.current) {
915-
// User manually stopped - show partial content if any
919+
// User manually stopped - show partial content separately
916920
if (streamingTextRef.current) {
917921
setMessages((prev) => [
918922
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
919923
{
920924
type: MESSAGE_TYPES.ASSISTANT,
921-
content: streamingTextRef.current + '\n\n' + gettext('(Generation stopped)'),
925+
content: streamingTextRef.current,
926+
},
927+
{
928+
type: MESSAGE_TYPES.ASSISTANT,
929+
content: gettext('Generation stopped.'),
922930
},
923931
]);
924932
} else {
@@ -1043,9 +1051,10 @@ export function NLQChatPanel() {
10431051
break;
10441052
}
10451053

1046-
case 'error':
1054+
case 'error': {
1055+
const streamId = streamingIdRef.current;
10471056
setMessages((prev) => [
1048-
...prev.filter((m) => m.id !== thinkingId && m.id !== streamingIdRef.current),
1057+
...prev.filter((m) => m.id !== thinkingId && m.id !== streamId),
10491058
{
10501059
type: MESSAGE_TYPES.ERROR,
10511060
content: event.message,

web/pgadmin/tools/sqleditor/tests/test_nlq_chat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def runTest(self):
9898
if hasattr(self, 'mock_response'):
9999
def mock_stream_gen(*args, **kwargs):
100100
yield self.mock_response
101-
yield (self.mock_response, [])
101+
yield ('complete', self.mock_response, [])
102102
mock_chat = patch(
103103
'pgadmin.llm.chat.chat_with_database_stream',
104104
side_effect=mock_stream_gen
@@ -254,7 +254,7 @@ def runTest(self):
254254
sql_blocks = re.findall(
255255
r'```(?:sql|pgsql|postgresql)\s*\n(.*?)```',
256256
response_text,
257-
re.DOTALL
257+
re.DOTALL | re.IGNORECASE
258258
)
259259
sql = ';\n\n'.join(
260260
block.strip() for block in sql_blocks
@@ -328,8 +328,8 @@ def mock_stream_gen(*args, **kwargs):
328328
for chunk in [self.mock_response[i:i + 10]
329329
for i in range(0, len(self.mock_response), 10)]:
330330
yield chunk
331-
# Yield final tuple
332-
yield (self.mock_response, [])
331+
# Yield final 3-tuple
332+
yield ('complete', self.mock_response, [])
333333

334334
mock_chat = patch(
335335
'pgadmin.llm.chat.chat_with_database_stream',

0 commit comments

Comments
 (0)