Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions apps/client/src/services/llm_chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,38 @@ export async function getAvailableModels(): Promise<LlmModelInfo[]> {
export interface StreamCallbacks {
onChunk: (text: string) => void;
onThinking?: (text: string) => void;
onToolUse?: (toolName: string, input: Record<string, unknown>) => void;
onToolUse?: (toolName: string, input: Record<string, unknown>, requiresApproval?: boolean) => void;
onToolResult?: (toolName: string, result: string, isError?: boolean) => void;
onCitation?: (citation: LlmCitation) => void;
onUsage?: (usage: LlmUsage) => void;
onError: (error: string) => void;
onDone: () => void;
}

/**
* Execute a mutating tool call after user approval.
*/
export async function executeToolCall(toolName: string, toolInput: Record<string, unknown>): Promise<{ result: string; isError?: boolean }> {
const response = await server.post<{ result?: object; error?: string }>("llm-chat/execute-tool", { toolName, toolInput });

if (response.error) {
return { result: response.error, isError: true };
}

return {
result: typeof response.result === "string" ? response.result : JSON.stringify(response.result)
};
}

/**
* Stream a chat completion from the LLM API using Server-Sent Events.
* Returns an AbortController that can be used to cancel the stream.
*/
export async function streamChatCompletion(
messages: LlmMessage[],
config: LlmChatConfig,
callbacks: StreamCallbacks
callbacks: StreamCallbacks,
abortSignal?: AbortSignal
): Promise<void> {
const headers = await server.getHeaders();

Expand All @@ -37,7 +54,8 @@ export async function streamChatCompletion(
...headers,
"Content-Type": "application/json"
} as HeadersInit,
body: JSON.stringify({ messages, config })
body: JSON.stringify({ messages, config }),
signal: abortSignal
});

if (!response.ok) {
Expand Down Expand Up @@ -76,7 +94,7 @@ export async function streamChatCompletion(
callbacks.onThinking?.(data.content);
break;
case "tool_use":
callbacks.onToolUse?.(data.toolName, data.toolInput);
callbacks.onToolUse?.(data.toolName, data.toolInput, data.requiresApproval);
// Yield to force Preact to commit the pending tool call
// state before we process the result.
await new Promise((r) => setTimeout(r, 1));
Expand Down
6 changes: 5 additions & 1 deletion apps/client/src/services/note_autocomplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ export interface Options {
hideAllButtons?: boolean;
/** If set, enables command palette mode */
isCommandPalette?: boolean;
/** If true, close the dropdown when the input loses focus (even when using a container). Defaults to false. */
closeOnBlur?: boolean;
}

async function autocompleteSourceForCKEditor(queryText: string) {
Expand Down Expand Up @@ -258,7 +260,9 @@ function initNoteAutocomplete($el: JQuery<HTMLElement>, options?: Options) {
let autocompleteOptions: AutoCompleteConfig = {};
if (options.container) {
autocompleteOptions.dropdownMenuContainer = options.container;
autocompleteOptions.debug = true; // don't close on blur
if (!options.closeOnBlur) {
autocompleteOptions.debug = true; // don't close on blur
}
}

if (options.allowJumpToSearchNotes) {
Expand Down
30 changes: 28 additions & 2 deletions apps/client/src/translations/en/translation.json
Original file line number Diff line number Diff line change
Expand Up @@ -1641,6 +1641,10 @@
"sources": "Sources",
"sources_summary": "{{count}} sources from {{sites}} sites",
"extended_thinking": "Extended thinking",
"knowledge_base": "Knowledge base",
"knowledge_base_sources": "Knowledge base sources",
"knowledge_base_add": "Add a note as source...",
"knowledge_base_remove": "Remove source",
"legacy_models": "Legacy models",
"thinking": "Thinking...",
"thought_process": "Thought process",
Expand All @@ -1649,6 +1653,11 @@
"result": "Result",
"error": "Error",
"tool_error": "failed",
"approve": "Approve",
"reject": "Reject",
"pending_approval": "This action requires your approval",
"rejected_by_user": "Rejected by user",
"stop": "Stop",
"total_tokens": "{{total}} tokens",
"tokens_detail": "{{prompt}} prompt + {{completion}} completion",
"tokens_used": "{{prompt}} prompt + {{completion}} completion = {{total}} tokens",
Expand All @@ -1660,7 +1669,8 @@
"note_context_enabled": "Click to disable note context: {{title}}",
"note_context_disabled": "Click to include current note in context",
"no_provider_message": "No AI provider configured. Add one to start chatting.",
"add_provider": "Add AI Provider"
"add_provider": "Add AI Provider",
"free": "Free"
},
"sidebar_chat": {
"title": "AI Chat",
Expand Down Expand Up @@ -2340,7 +2350,19 @@
"delete_provider_confirmation": "Are you sure you want to delete the provider \"{{name}}\"?",
"api_key": "API Key",
"api_key_placeholder": "Enter your API key",
"base_url": "Base URL",
"cancel": "Cancel",
"web_search_title": "Web Search Engine",
"web_search_description": "Choose which search engine the AI agent uses for web searches. Provider default uses the built-in search of each LLM provider (Anthropic, OpenAI, Google). Tavily and SearXNG work with all providers including Ollama.",
"web_search_engine": "Search engine",
"web_search_engine_description": "Select the search engine to use for AI web searches",
"web_search_provider_default": "Provider default (built-in)",
"tavily_api_key": "Tavily API key",
"tavily_api_key_description": "Get a free API key at tavily.com (1,000 searches/month free)",
"searxng_url": "SearXNG instance URL",
"searxng_url_description": "URL of your self-hosted SearXNG instance",
"search_timeout": "Search timeout (seconds)",
"search_timeout_description": "Maximum time to wait for web search results before timing out",
"mcp_title": "MCP (Model Context Protocol)",
"mcp_enabled": "MCP server",
"mcp_enabled_description": "Expose a Model Context Protocol (MCP) endpoint so that AI coding assistants (e.g. Claude Code, GitHub Copilot) can read and modify your notes. The endpoint is only accessible from localhost.",
Expand All @@ -2363,7 +2385,11 @@
"web_search": "Web search",
"note_in_parent": "<Note/> in <Parent/>",
"get_attachment": "Get attachment",
"get_attachment_content": "Read attachment content"
"get_attachment_content": "Read attachment content",
"rename_note": "Rename note",
"delete_note": "Delete note",
"move_note": "Move note",
"clone_note": "Clone note"
}
}
}
7 changes: 6 additions & 1 deletion apps/client/src/widgets/sidebar/SidebarChat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,12 @@ export default function SidebarChat() {
/>
)}
{chat.messages.map(msg => (
<ChatMessage key={msg.id} message={msg} />
<ChatMessage
key={msg.id}
message={msg}
onApproveToolCall={chat.approveToolCall}
onRejectToolCall={chat.rejectToolCall}
/>
))}
{chat.isStreaming && chat.streamingThinking && (
<ChatMessage
Expand Down
96 changes: 96 additions & 0 deletions apps/client/src/widgets/type_widgets/llm_chat/ChatInputBar.css
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@
opacity: 0.4;
}

.llm-chat-stop-btn {
color: var(--danger-color, #dc3545);
}

/* Model selector */
.llm-chat-model-selector {
display: flex;
Expand Down Expand Up @@ -167,3 +171,95 @@
margin: 0;
font-size: 0.9rem;
}

/* Knowledge base sources */
.llm-chat-kb-sources {
display: flex;
flex-direction: column;
gap: 0.375rem;
padding: 0.5rem;
border: 1px solid var(--main-border-color);
border-radius: 6px;
background: var(--accented-background-color);
}

.llm-chat-kb-header {
display: flex;
align-items: center;
gap: 0.375rem;
font-size: 0.8rem;
color: var(--muted-text-color);
font-weight: 600;
}

.llm-chat-kb-chips {
display: flex;
flex-wrap: wrap;
gap: 0.25rem;
}

.llm-chat-kb-chip {
display: inline-flex;
align-items: center;
gap: 0.25rem;
padding: 0.125rem 0.375rem;
border-radius: 4px;
background: var(--main-background-color);
border: 1px solid var(--main-border-color);
font-size: 0.8rem;
max-width: 200px;
}

.llm-chat-kb-chip-title {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}

.llm-chat-kb-chip-remove {
display: flex;
align-items: center;
justify-content: center;
background: none;
border: none;
cursor: pointer;
padding: 0;
color: var(--muted-text-color);
font-size: 0.9rem;
line-height: 1;
}

.llm-chat-kb-chip-remove:hover:not(:disabled) {
color: var(--danger-color, #d9534f);
}

.llm-chat-kb-chip-remove:disabled {
opacity: 0.5;
cursor: not-allowed;
}

.llm-chat-kb-sources .note-autocomplete-container input {
font-size: 0.8rem;
padding: 0.25rem 0.5rem;
}

/* Position the autocomplete dropdown above the input */
.llm-chat-kb-autocomplete-wrapper {
position: relative;
margin-top: 0.25rem;
}

.llm-chat-kb-autocomplete-wrapper .aa-dropdown-menu {
position: absolute !important;
bottom: 100% !important;
top: auto !important;
left: 0;
right: 0;
max-height: 300px;
overflow: auto;
margin-bottom: 2px;
background-color: var(--main-background-color);
border: 1px solid var(--main-border-color);
border-radius: 6px;
z-index: 2000;
}
Loading
Loading