Coverage for src / lilbee / cli / tui / screens / chat.py: 100%
915 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-05-15 20:55 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-05-15 20:55 +0000
1"""Chat screen: scrollable message log with streaming markdown responses."""
3from __future__ import annotations
5import asyncio
6import contextlib
7import logging
8import os
9import shlex
10import threading
11import time
12from collections.abc import Callable
13from pathlib import Path
14from typing import TYPE_CHECKING, Any, ClassVar
16from textual import events, getters, on, work
17from textual.actions import SkipAction
18from textual.app import ComposeResult
19from textual.binding import Binding, BindingType
20from textual.containers import Vertical, VerticalScroll
21from textual.content import Content
22from textual.css.query import NoMatches
23from textual.dom import DOMNode
24from textual.reactive import reactive
25from textual.screen import Screen
26from textual.widgets import Footer, Select, Static
28# Cancellation check for @work(thread=True) workers. Import at module level
29# since it's used in multiple methods.
30from textual.worker import get_current_worker as _get_worker
32from lilbee.app.services import get_services, reset_services, reset_store
33from lilbee.app.version import get_version
34from lilbee.cli.settings_map import SETTINGS_MAP
35from lilbee.cli.tui import messages as msg
36from lilbee.cli.tui.app import DARK_THEMES, LilbeeApp, apply_active_model
37from lilbee.cli.tui.screens.chat_helpers import (
38 build_add_progress_callback,
39 build_sync_progress_callback,
40 close_stream,
41 remove_copied_files,
42)
43from lilbee.cli.tui.thread_safe import call_from_thread
44from lilbee.cli.tui.widgets.arg_hint import ArgHintLine
45from lilbee.cli.tui.widgets.autocomplete import CompletionOverlay, get_completions
46from lilbee.cli.tui.widgets.chat_input import ChatInput
47from lilbee.cli.tui.widgets.help_hint import HelpHint
48from lilbee.cli.tui.widgets.message import AssistantMessage, UserMessage
49from lilbee.cli.tui.widgets.model_bar import ChatModeToggle, ModelBar, ModelPickerButton
50from lilbee.cli.tui.widgets.slash_command_catalog import SlashCommandCatalog
51from lilbee.cli.tui.widgets.status_bar import ViewTabs
52from lilbee.cli.tui.widgets.task_bar import TaskBar
53from lilbee.cli.tui.widgets.task_bar_controller import ProgressReporter
54from lilbee.core import settings
55from lilbee.core.config import cfg
56from lilbee.core.config.enums import ChatMode
57from lilbee.crawler import crawler_available, is_url, require_valid_crawl_url
58from lilbee.data.store import scope_to_chunk_type
59from lilbee.providers.model_ref import parse_model_ref
60from lilbee.retrieval.embedder import is_model_available
61from lilbee.retrieval.query import ChatMessage
62from lilbee.runtime import asyncio_loop
63from lilbee.runtime.progress import (
64 EventType,
65 ProgressEvent,
66)
68if TYPE_CHECKING:
69 from lilbee.cli.tui.widgets.task_bar_controller import TaskBarController
70log = logging.getLogger(__name__)
72_MAX_HISTORY_MESSAGES = 200
74# Treat the user as "still at the bottom" when within this many lines so a tiny
75# stray scroll doesn't disable auto-follow during streaming.
76_AUTO_SCROLL_TAIL_LINES = 5
78# Coalesce per-token UI updates into ~50 ms windows. Tiny reasoning models can
79# emit 100+ tokens/sec; one ``call_from_thread`` per token saturates Textual's
80# message queue and makes key events visibly lag.
81_STREAM_FLUSH_INTERVAL = 0.05
83# Auto-scroll throttle. ~6 fps so heavy token streams don't peg the renderer.
84_STREAM_SCROLL_INTERVAL = 0.15
87class ChatWelcome(Static):
88 """Empty-state welcome posted into the chat log; removed on first message."""
90 def __init__(self, *, id: str | None = None) -> None:
91 title = Content.styled(msg.CHAT_WELCOME_TITLE, "bold $primary")
92 tagline = Content.styled(msg.CHAT_WELCOME_TAGLINE, "$text-muted")
93 hint = Content.styled(msg.CHAT_WELCOME_HINT, "$text-muted")
94 body = Content.assemble(title, "\n", tagline, "\n\n", hint)
95 super().__init__(body, id=id)
98class PromptArea(Vertical):
99 """Container for chat input that highlights on focus-within."""
101 pass
104class ChatScreen(Screen[None]):
105 """Primary chat interface with streaming LLM responses."""
107 # Lilbee always hosts screens on a LilbeeApp (production + LilbeeAppHost
108 # in tests), so narrowing the type lets the screen call set_theme /
109 # switch_view / task_bar without isinstance dance or # type: ignore.
110 app: LilbeeApp # type: ignore[assignment]
112 CSS_PATH = "chat.tcss"
113 AUTO_FOCUS = "#chat-input"
115 streaming: reactive[bool] = reactive(False)
117 HELP = (
118 "# Chat\n\n"
119 "Ask questions about your knowledge base.\n\n"
120 "Press **Escape** for normal mode (vim keys), "
121 "**i**/**a**/**o** to return to insert mode."
122 )
124 _SCROLL_GROUP = Binding.Group("Scroll", compact=True)
126 # Hot-path widget refs. ``getters.query_one`` is a typed class-level
127 # descriptor that resolves via Textual's indexed DOM lookup on every
128 # access. It is O(1) for id selectors, so no cache is needed.
129 _chat_input = getters.query_one("#chat-input", ChatInput)
130 _chat_log = getters.query_one("#chat-log", VerticalScroll)
131 _completion_overlay = getters.query_one("#completion-overlay", CompletionOverlay)
132 _arg_hint = getters.query_one("#arg-hint", ArgHintLine)
134 BINDINGS: ClassVar[list[BindingType]] = [
135 # `/` opens the slash-command line (Tab completes it -- the
136 # adjacent `Tab Complete` hint spells that out). The label says
137 # "Slash commands" rather than the bare "Commands" so the footer
138 # tells the user what `/` actually does.
139 Binding("slash", "focus_commands", "Slash commands", show=True),
140 Binding("tab", "complete", "Complete", show=True, priority=True),
141 Binding("ctrl+n", "complete_next", "Next match", show=False, priority=True),
142 # Ctrl+P stays bound to the app's command palette by default. The
143 # chat screen only intercepts it WHEN the dropdown is visible, via
144 # LilbeeApp.action_command_palette overriding to call
145 # ChatScreen.action_complete_prev. Action is exposed for direct
146 # callers / tests; not bound here so the app-level priority binding
147 # for ctrl+p (palette) wins by default.
148 Binding("pageup", "scroll_up", "PgUp", show=False, group=_SCROLL_GROUP),
149 Binding("pagedown", "scroll_down", "PgDn", show=False, group=_SCROLL_GROUP),
150 Binding("ctrl+d", "half_page_down", "^d half PgDn", show=False, group=_SCROLL_GROUP),
151 Binding("ctrl+u", "half_page_up", "^u half PgUp", show=False, group=_SCROLL_GROUP),
152 Binding("j", "vim_scroll_down", "j down", show=False, group=_SCROLL_GROUP),
153 Binding("k", "vim_scroll_up", "k up", show=False, group=_SCROLL_GROUP),
154 Binding("g", "vim_scroll_home", "g top", show=False, group=_SCROLL_GROUP),
155 Binding("G", "vim_scroll_end", "G bottom", show=False, group=_SCROLL_GROUP),
156 # priority=True keeps history navigation fast-path winning over the
157 # ChatInput's TextArea cursor_up/_down. Multi-line cursor movement
158 # inside the prompt still works via PgUp/PgDn/Home/End.
159 Binding("up", "history_prev", "Up", show=False, priority=True),
160 Binding("down", "history_next", "Down", show=False, priority=True),
161 # Esc always drops back into NORMAL mode so the user can navigate
162 # the terminal. Cancel-while-streaming is on Ctrl+C below; the
163 # two roles used to share Esc and clobbered each other.
164 Binding("escape", "enter_normal_mode", "Normal mode", show=True, priority=True),
165 # Ctrl+C cancels the active stream when streaming AND in INSERT
166 # mode so the user can interrupt without leaving the input. The
167 # screen-level priority binding overrides the App-level Quit;
168 # check_action below hides + disables it outside that exact
169 # context, so Ctrl+C still quits the app from NORMAL or when
170 # nothing is streaming.
171 Binding("ctrl+c", "cancel_stream", "Cancel stream", show=True, priority=True),
172 Binding("ctrl+r", "toggle_markdown", "Markdown", show=False),
173 # `m` is a NORMAL-mode shortcut to the model bar; in INSERT mode the
174 # focused chat input types the literal "m". `check_action` hides it
175 # from the footer there so the hint matches what the key does.
176 Binding("m", "focus_model_bar", "Models", show=True),
177 Binding("s", "cycle_scope", "Scope", show=False),
178 # F2 opens the searchable list of every slash command
179 # (SlashCommandCatalog) -- not the model catalog, which is `/models`.
180 # Labeled "All commands" so it reads distinctly from `/ Slash commands`.
181 Binding("f2", "show_command_catalog", "All commands", show=True, priority=True),
182 Binding("f3", "toggle_chat_mode", "Search/Chat", show=False),
183 Binding("f5", "open_setup", "Setup", show=False),
184 ]
186 def __init__(self) -> None:
187 super().__init__()
188 self._history: list[ChatMessage] = []
189 self._history_lock = threading.Lock()
190 self._insert_mode: bool = True
191 self._completing = False
192 self._sync_active: bool = False
193 self._input_history: list[str] = []
194 self._history_index: int = -1
195 self._command_handlers: dict[str, Callable[[str], None]] = self._build_command_handlers()
197 def _build_command_handlers(self) -> dict[str, Callable[[str], None]]:
198 """Bind every COMMANDS entry to its handler method on this instance.
200 Run once at construction so /handle_slash dispatches via direct method
201 reference (no per-call getattr-by-string-name reflection).
202 """
203 from lilbee.cli.tui.command_registry import COMMANDS
205 handlers: dict[str, Callable[[str], None]] = {}
206 for cmd in COMMANDS:
207 method = getattr(self, cmd.handler)
208 for name in (cmd.name, *cmd.aliases):
209 handlers[name] = method
210 return handlers
212 @property
213 def _task_bar(self) -> TaskBarController:
214 """The app-level TaskBarController (always set by LilbeeApp)."""
215 return self.app.task_bar
217 def compose(self) -> ComposeResult:
218 from lilbee.cli.tui.widgets.bottom_bars import BottomBars
219 from lilbee.cli.tui.widgets.scope_chip import ScopeChip
220 from lilbee.cli.tui.widgets.top_bars import TopBars
222 with TopBars():
223 yield ViewTabs()
224 yield VerticalScroll(
225 ChatWelcome(id="chat-welcome"),
226 id="chat-log",
227 )
228 yield CompletionOverlay(id="completion-overlay")
229 with BottomBars():
230 with PromptArea(id="chat-prompt-area"):
231 yield ScopeChip(id="scope-chip")
232 yield ChatInput(
233 placeholder=msg.CHAT_INPUT_PLACEHOLDER_DEFAULT,
234 id="chat-input",
235 )
236 yield ArgHintLine(id="arg-hint")
237 yield ModelBar(id="model-bar")
238 yield TaskBar()
239 yield HelpHint(id="help-hint")
240 yield Footer()
242 def on_mount(self) -> None:
243 self._update_input_style()
244 self.app.settings_changed_signal.subscribe(self, self._on_settings_changed)
245 self._setup_check_worker()
247 @work(thread=True, name="chat_setup_check", exit_on_error=False)
248 def _setup_check_worker(self) -> None:
249 """Run ``_needs_setup`` off the UI thread; push the wizard if needed."""
250 if not self._needs_setup():
251 return
252 call_from_thread(self, self._push_setup_wizard)
254 def _push_setup_wizard(self) -> None:
255 """Push the SetupWizard if the screen is still mounted."""
256 if not self.is_mounted:
257 return
258 from lilbee.cli.tui.screens.setup import SetupWizard
260 self.app.push_screen(SetupWizard(), self._on_setup_complete)
262 def on_show(self) -> None:
263 """Called when screen becomes visible."""
264 from lilbee.runtime.splash import dismiss
266 dismiss()
267 self.refresh_model_bar()
268 # AUTO_FOCUS only fires once on initial mount. Re-entering the
269 # screen via view-nav needs an explicit focus restore. In INSERT
270 # mode we send focus to the chat input; in NORMAL mode we send
271 # focus to the chat log (the input is intentionally unfocusable
272 # so global bindings keep firing).
273 with contextlib.suppress(Exception):
274 if self._insert_mode:
275 self._enter_insert_mode()
276 else:
277 self._chat_log.focus()
279 def _needs_setup(self) -> bool:
280 """True when the setup wizard should run: fresh data dir or unresolved models.
282 Remote-prefixed refs skip the native probe since they resolve
283 through the SDK backend at call time.
284 """
285 if not cfg.lancedb_dir.is_dir():
286 log.debug("_needs_setup: lancedb_dir missing (%s)", cfg.lancedb_dir)
287 return True
288 from lilbee.providers.base import ProviderError
289 from lilbee.providers.llama_cpp.provider import resolve_model_path
291 for label, model in (("chat", cfg.chat_model), ("embedding", cfg.embedding_model)):
292 if parse_model_ref(model).is_remote:
293 continue
294 try:
295 resolve_model_path(model)
296 except (ProviderError, KeyError, ValueError) as exc:
297 log.debug("_needs_setup: %s model %r unresolved: %s", label, model, exc)
298 return True
299 return False
301 def _embedding_ready(self) -> bool:
302 """Quick check if the embedding model resolves (no network calls)."""
303 return is_model_available(cfg.embedding_model, get_services().provider)
305 def _on_setup_complete(self, result: str | None) -> None:
306 """Called when wizard completes or is skipped."""
307 # Re-detect after setup so a freshly-set-up vault gets the hint.
308 self.app.task_bar.start_detect_pending()
309 self.refresh_model_bar()
311 def _on_settings_changed(self, payload: tuple[str, object]) -> None:
312 key, _value = payload
313 if key in {"chat_mode", "embedding_model"}:
314 self.refresh_model_bar()
316 def action_open_setup(self) -> None:
317 """Open the setup wizard."""
318 self._cmd_setup("")
320 def _enter_insert_mode(self) -> None:
321 """Switch to insert mode: focus input, update border style."""
322 self._insert_mode = True
323 self._chat_input.can_focus = True
324 self._chat_input.focus()
325 self._update_input_style()
327 def _update_input_style(self) -> None:
328 """Toggle input opacity and mode indicator based on current mode."""
329 inp = self._chat_input
330 if self._insert_mode:
331 inp.remove_class("normal-mode")
332 else:
333 inp.add_class("normal-mode")
334 self._update_mode_indicator()
336 def _update_mode_indicator(self) -> None:
337 """Update the ViewTabs mode text to reflect the current mode."""
338 with contextlib.suppress(NoMatches):
339 bar = self.query_one(ViewTabs)
340 bar.mode_text = msg.MODE_INSERT if self._insert_mode else msg.MODE_NORMAL
342 def on_key(self, event: object) -> None:
343 """Handle key events: vim mode and typing from chat log."""
344 from textual.events import Key
346 if not isinstance(event, Key):
347 return
348 inp = self._chat_input
349 if self._insert_mode:
350 if not inp.has_focus and event.is_printable and event.character:
351 inp.focus()
352 inp.insert(event.character)
353 event.prevent_default()
354 event.stop()
355 return
356 if event.key == "enter" or (event.character and event.character in "iao"):
357 # Let a focused Select / picker button handle Enter / i / a / o itself.
358 if isinstance(self.focused, (Select, ModelPickerButton)):
359 return
360 self._enter_insert_mode()
361 event.prevent_default()
362 event.stop()
363 return
365 @on(events.DescendantFocus, "#chat-input")
366 def _on_chat_input_focused(self, event: events.DescendantFocus) -> None:
367 """Mark INSERT mode whenever the chat input takes focus.
369 With ``can_focus = False`` while in NORMAL mode, the only way the
370 input gains focus is via an explicit user action (click, or the
371 :meth:`_enter_insert_mode` helper that sets ``can_focus = True``
372 and focuses the input). Either path implies INSERT, so we sync
373 the screen mode here.
374 """
375 if not self._insert_mode:
376 self._enter_insert_mode()
378 @on(events.Click, "#chat-input")
379 def _on_chat_input_clicked(self, event: events.Click) -> None:
380 """Click on the chat input bar promotes to INSERT.
382 ``can_focus = False`` while in NORMAL mode swallows focus from the
383 click, so DescendantFocus never fires. Hook the Click directly so
384 a mouse user lands in INSERT just like a keystroke (i / a / o).
385 """
386 if not self._insert_mode:
387 self._enter_insert_mode()
388 event.stop()
390 def on_click(self, event: events.Click) -> None:
391 """Click outside the chat input bar drops back to NORMAL.
393 The chat-input click handler above promotes to INSERT; the
394 symmetric exit happens here so a mouse user gets the same
395 click-to-blur behavior they expect from any other text editor.
396 """
397 if not self._insert_mode:
398 return
399 if event.widget is None:
400 return
401 chat_input = self._chat_input
402 node: DOMNode | None = event.widget
403 while node is not None:
404 if node is chat_input:
405 return
406 node = node.parent
407 self.action_enter_normal_mode()
409 @on(ChatInput.Submitted, "#chat-input")
410 def _on_chat_submitted(self, event: ChatInput.Submitted) -> None:
411 if not self._insert_mode:
412 # Vim-style: Enter in normal mode flips back to insert without
413 # submitting whatever empty / stale text the input still holds.
414 self._enter_insert_mode()
415 return
416 if self.streaming:
417 # Only one chat message may be in flight at a time. Surface a
418 # toast so the user knows the prompt was rejected (rather
419 # than silently dropped) and ask them to cancel first if
420 # they want to redirect the model.
421 self.notify(msg.CHAT_BUSY, severity="warning", timeout=3)
422 return
423 # Enter when the completion dropdown is showing a different
424 # selection than the input itself: accept the highlight first
425 # (matches Tab's cycle-and-insert behavior) instead of submitting
426 # whatever bare prefix the user typed.
427 if self._accept_overlay_selection_on_enter():
428 return
429 text = event.value.strip()
430 if not text:
431 return
432 event.chat_input.value = ""
433 self._input_history.append(text)
434 self._history_index = -1
436 if text.startswith("/"):
437 self._handle_slash(text)
438 return
439 self._send_message(text)
441 def _accept_overlay_selection_on_enter(self) -> bool:
442 """Accept the highlight as ``<selection> ``; True if Enter was consumed."""
443 overlay = self._completion_overlay
444 if not overlay.is_visible:
445 return False
446 selection = overlay.get_current()
447 inp = self._chat_input
448 if not selection or selection == inp.value.rstrip():
449 overlay.hide()
450 return False
451 cmd_prefix = inp.value.split()[0] + " " if " " in inp.value else ""
452 self._completing = True
453 inp.value = f"{cmd_prefix}{selection} "
454 self._completing = False
455 inp.action_end()
456 overlay.hide()
457 return True
459 def _handle_slash(self, text: str) -> None:
460 """Dispatch slash commands via the per-instance handler registry."""
461 cmd = text.split()[0].lower()
462 args = text[len(cmd) :].strip()
463 handler = self._command_handlers.get(cmd)
464 if handler is not None:
465 handler(args)
466 else:
467 self.notify(msg.CMD_UNKNOWN.format(cmd=cmd), severity="warning")
469 def _set_streaming(self, value: bool) -> None:
470 """Main-thread setter so worker-thread paths can route through ``call_from_thread``."""
471 self.streaming = value
473 def watch_streaming(self, streaming: bool) -> None:
474 if streaming:
475 self._enter_streaming_state()
476 else:
477 self._exit_streaming_state()
479 def _enter_streaming_state(self) -> None:
480 self.add_class("streaming")
481 # Cancel + finalize both write streaming=False; reactive dedupe
482 # keeps the watcher a no-op on equal values.
483 self.refresh_bindings()
485 def _exit_streaming_state(self) -> None:
486 self.remove_class("streaming")
487 self.refresh_bindings()
489 def _cmd_add(self, args: str) -> None:
490 if not args:
491 return
492 if self._sync_active:
493 self.notify(msg.SYNC_ALREADY_ACTIVE, severity="warning")
494 return
495 if is_url(args):
496 self._cmd_crawl(args)
497 return
498 # Platform-aware shell parsing: POSIX rules treat backslashes as
499 # escapes, so a Windows path like C:\Users\foo gets mangled to
500 # C:Usersfoo. shlex(posix=False) keeps backslashes literal but
501 # leaves surrounding quotes attached to tokens, so trim those
502 # before constructing Path objects.
503 try:
504 tokens = shlex.split(args, posix=os.name != "nt")
505 except ValueError as exc:
506 self.notify(str(exc), severity="error")
507 return
508 if os.name == "nt":
509 tokens = [t.strip('"').strip("'") for t in tokens]
510 paths = [Path(token).expanduser() for token in tokens]
511 missing = [p for p in paths if not p.exists()]
512 if missing:
513 self.notify(
514 msg.CMD_ADD_NOT_FOUND.format(path=", ".join(str(p) for p in missing)),
515 severity="error",
516 )
517 return
518 # Directory adds are whole-tree copies handled by copy_files'
519 # recursion; a same-named subdir in documents_dir is not a clean
520 # "duplicate file" signal, so skip the prompt there and let
521 # copy_files emit its per-file skipped notices.
522 duplicates = [p for p in paths if p.is_file() and (cfg.documents_dir / p.name).exists()]
523 if duplicates:
524 self._prompt_overwrite(paths, duplicates)
525 return
526 self._submit_add(paths, force=False)
528 def _prompt_overwrite(self, paths: list[Path], duplicates: list[Path]) -> None:
529 """Ask to overwrite existing copies before re-syncing."""
530 from lilbee.cli.tui.widgets.confirm_dialog import ConfirmDialog
532 names = ", ".join(p.name for p in duplicates)
534 def _on_confirm(confirmed: bool | None) -> None:
535 if not confirmed:
536 self.notify(msg.CMD_ADD_SKIPPED_DUPLICATE.format(name=names))
537 return
538 self._submit_add(paths, force=True)
540 self.app.push_screen(
541 ConfirmDialog(
542 msg.CMD_ADD_DUPLICATE_TITLE,
543 msg.CMD_ADD_DUPLICATE_MESSAGE.format(name=names),
544 ),
545 _on_confirm,
546 )
548 def _submit_add(self, paths: list[Path], *, force: bool) -> None:
549 """Spawn the add worker. Separated so overwrite confirm can reuse it."""
550 from lilbee.cli.tui.task_queue import TaskType
552 self._sync_active = True
553 label = paths[0].name if len(paths) == 1 else f"{len(paths)} files"
555 def _target(reporter: ProgressReporter) -> None:
556 try:
557 self._do_add(paths, reporter, force=force)
558 finally:
559 self._sync_active = False
561 self._task_bar.start_task(f"Add {label}", TaskType.ADD, _target, indeterminate=True)
563 def _do_add(
564 self, paths: list[Path], reporter: ProgressReporter, *, force: bool = False
565 ) -> None:
566 """Copy files and run sync. Called on worker thread with a reporter."""
567 from lilbee.app.ingest import copy_files
568 from lilbee.data.ingest import sync
570 label = paths[0].name if len(paths) == 1 else f"{len(paths)} files"
571 reporter.update(0, f"Copying {label}...", indeterminate=True)
572 copy_result = copy_files(paths, force=force)
573 copied = copy_result.copied
574 for name in copy_result.skipped:
575 call_from_thread(self, self.notify, f"{name} already exists (use --force to overwrite)")
576 reporter.update(0, f"Copied {len(copied)} file(s), syncing...", indeterminate=True)
578 try:
579 sync_result = asyncio_loop.run(
580 sync(quiet=True, on_progress=build_add_progress_callback(reporter))
581 )
582 except BaseException:
583 # On cancel or any failure, remove the files we copied into
584 # documents/ so the next sync doesn't silently re-ingest the
585 # file the user just cancelled. Only files copied by
586 # this /add invocation are removed; pre-existing files the user
587 # put in documents/ themselves are never touched.
588 remove_copied_files(copied)
589 raise
590 if sync_result.failed:
591 remove_copied_files(copied)
592 raise RuntimeError(msg.SYNC_FAILED_FILES.format(files=", ".join(sync_result.failed)))
593 if sync_result.skipped:
594 remove_copied_files(copied)
595 raise RuntimeError(msg.sync_skipped_message(", ".join(sync_result.skipped)))
596 call_from_thread(self, self.notify, msg.CMD_ADD_SUCCESS.format(count=len(copied)))
598 def _cmd_cancel(self, _args: str) -> None:
599 for worker in self.workers:
600 worker.cancel()
601 self.notify(msg.CMD_CANCEL)
603 def _cmd_clear(self, _args: str) -> None:
604 for worker in self.workers:
605 worker.cancel()
606 self.streaming = False
607 chat_log = self._chat_log
608 chat_log.remove_children()
609 with self._history_lock:
610 self._history.clear()
611 self.notify(msg.CMD_CLEAR)
613 def _cmd_crawl(self, args: str) -> None:
614 if not crawler_available():
615 self.notify(msg.CMD_CRAWL_UNAVAILABLE, severity="error")
616 return
617 if not args:
618 self._open_crawl_dialog()
619 return
620 parts = args.split()
621 url = parts[0]
622 if not is_url(url):
623 url = f"https://{url}"
624 try:
625 require_valid_crawl_url(url)
626 except ValueError as exc:
627 self.notify(str(exc), severity="error")
628 return
629 depth, max_pages, include_subdomains = self._parse_crawl_flags(parts[1:])
630 self._start_crawl(url, depth, max_pages, include_subdomains=include_subdomains)
632 def _open_crawl_dialog(self) -> None:
633 """Push the crawl modal and handle its result."""
634 from lilbee.cli.tui.widgets.crawl_dialog import CrawlDialog, CrawlParams
636 def _on_result(result: CrawlParams | None) -> None:
637 if result is not None:
638 self._start_crawl(result.url, result.depth, result.max_pages)
640 self.app.push_screen(CrawlDialog(), callback=_on_result)
642 def _start_crawl(
643 self,
644 url: str,
645 depth: int | None,
646 max_pages: int | None,
647 *,
648 include_subdomains: bool = False,
649 ) -> None:
650 """Enqueue a crawl task and run it in the background.
652 Bootstrap Chromium first via the controller helper. If the
653 browser isn't installed yet, a SETUP task renders in the Task
654 Center and the crawl kicks off from its on_success hook. On a
655 machine where Chromium is already present this is a synchronous
656 no-op and the crawl starts immediately (bb-wq8g).
657 """
658 from lilbee.cli.tui.task_queue import TaskType
660 def _kick_off_crawl() -> None:
661 self._task_bar.start_task(
662 msg.TASK_NAME_CRAWL.format(url=url),
663 TaskType.CRAWL,
664 lambda reporter: self._do_crawl(
665 url, depth, max_pages, reporter, include_subdomains=include_subdomains
666 ),
667 on_success=lambda: call_from_thread(self, self._run_sync),
668 )
670 self.notify(msg.CMD_CRAWL_STARTED.format(url=url))
671 self._task_bar.ensure_chromium(_kick_off_crawl)
673 @staticmethod
674 def _parse_crawl_flags(tokens: list[str]) -> tuple[int | None, int | None, bool]:
675 """Extract --depth, --max-pages, and --include-subdomains from tokens.
677 Numeric flags return None when absent so the caller inherits
678 crawl_and_save's unbounded-by-default semantics. The boolean
679 ``--include-subdomains`` flag defaults to False (exact-host scope).
680 """
681 flag_map = {"--depth": "depth", "--max-pages": "max_pages"}
682 parsed: dict[str, int | None] = {"depth": None, "max_pages": None}
683 include_subdomains = False
684 i = 0
685 while i < len(tokens):
686 if tokens[i] == "--include-subdomains":
687 include_subdomains = True
688 i += 1
689 continue
690 key = flag_map.get(tokens[i])
691 if key and i + 1 < len(tokens):
692 with contextlib.suppress(ValueError):
693 parsed[key] = int(tokens[i + 1])
694 i += 2
695 else:
696 i += 1
697 return parsed["depth"], parsed["max_pages"], include_subdomains
699 def _do_crawl(
700 self,
701 url: str,
702 depth: int | None,
703 max_pages: int | None,
704 reporter: ProgressReporter,
705 *,
706 include_subdomains: bool = False,
707 ) -> None:
708 """Crawl body. Runs on worker thread; reporter handles progress + cancel."""
709 from lilbee.crawler import crawl_and_save
710 from lilbee.runtime.progress import CrawlPageEvent, SetupProgressEvent
712 reporter.update(0, msg.CMD_CRAWL_STARTED.format(url=url))
714 def on_progress(event_type: EventType, data: ProgressEvent) -> None:
715 if event_type == EventType.SETUP_START:
716 reporter.update(0, msg.SETUP_CHROMIUM_NAME)
717 elif event_type == EventType.SETUP_PROGRESS and isinstance(data, SetupProgressEvent):
718 if data.total_bytes:
719 pct = int(data.downloaded_bytes * 100 / data.total_bytes)
720 detail = msg.SETUP_CHROMIUM_DETAIL.format(
721 done=data.downloaded_bytes // (1024 * 1024),
722 total=data.total_bytes // (1024 * 1024),
723 )
724 else:
725 pct = 0
726 detail = msg.SETUP_CHROMIUM_DETAIL_UNKNOWN.format(
727 done=data.downloaded_bytes // (1024 * 1024),
728 )
729 reporter.update(pct, detail)
730 elif event_type == EventType.CRAWL_PAGE and isinstance(data, CrawlPageEvent):
731 # Discovery hasn't resolved a sitemap yet (data.total <= 0):
732 # show the indeterminate spinner with a count, not a parked
733 # 50% bar that looks frozen. Switch to a determinate bar as
734 # soon as the total is known.
735 if data.total > 0:
736 pct = int(data.current * 100 / data.total)
737 reporter.update(
738 pct,
739 msg.CMD_CRAWL_PAGE.format(
740 current=data.current, total=data.total, url=data.url
741 ),
742 indeterminate=False,
743 )
744 else: # pragma: no cover - live crawl without sitemap
745 reporter.update(
746 0,
747 msg.CMD_CRAWL_PAGE_INDETERMINATE.format(current=data.current, url=data.url),
748 indeterminate=True,
749 )
751 paths = asyncio_loop.run(
752 crawl_and_save(
753 url,
754 depth=depth,
755 max_pages=max_pages,
756 on_progress=on_progress,
757 quiet=True,
758 include_subdomains=include_subdomains,
759 )
760 )
761 call_from_thread(self, self.notify, msg.CMD_CRAWL_SUCCESS.format(count=len(paths), url=url))
763 def _cmd_catalog(self, _args: str) -> None:
764 self.app.switch_view("Catalog")
765 from lilbee.cli.tui.screens.catalog import CatalogScreen
767 self.app.push_screen(CatalogScreen())
769 def _cmd_delete(self, args: str) -> None:
770 """Run /delete in a worker so the chat screen stays interactive."""
771 self._cmd_delete_worker(args.strip())
773 @work(thread=True, name="chat_cmd_delete", exit_on_error=False)
774 def _cmd_delete_worker(self, name: str) -> None:
775 """Validate and execute /delete off the UI thread; notify back via dispatch."""
776 try:
777 sources = get_services().store.get_sources()
778 except Exception:
779 log.debug("Failed to list documents for /delete", exc_info=True)
780 call_from_thread(self, self.notify, msg.CMD_DELETE_NO_DOCS, severity="warning")
781 return
783 known = {s.get("filename", s.get("source", "?")) for s in sources}
784 if not known:
785 call_from_thread(self, self.notify, msg.CMD_DELETE_NO_DOCS, severity="warning")
786 return
788 if not name:
789 usage = msg.CMD_DELETE_USAGE.format(names=", ".join(sorted(known)))
790 call_from_thread(self, self.notify, usage)
791 return
793 if name not in known:
794 call_from_thread(
795 self,
796 self.notify,
797 msg.CMD_DELETE_NOT_FOUND.format(name=name),
798 severity="error",
799 )
800 return
802 store = get_services().store
803 store.delete_by_source(name)
804 store.delete_source(name)
805 from lilbee.cli.tui.widgets.autocomplete import invalidate_document_cache
807 invalidate_document_cache()
808 call_from_thread(self, self.notify, msg.CMD_DELETE_SUCCESS.format(name=name))
810 def _cmd_help(self, _args: str) -> None:
811 self.action_show_command_catalog()
813 def action_show_command_catalog(self) -> None:
814 """Push the slash-command catalog modal; selected name is inserted into the input."""
815 self.app.push_screen(SlashCommandCatalog(), self._on_catalog_pick)
817 def insert_slash_command(self, name: str) -> None:
818 """Drop ``name + ' '`` into the chat input and focus it for argument entry."""
819 self._enter_insert_mode()
820 inp = self._chat_input
821 inp.value = f"{name} "
822 inp.action_end()
824 def _on_catalog_pick(self, name: str | None) -> None:
825 if name is None:
826 return
827 self.insert_slash_command(name)
829 def _cmd_login(self, args: str) -> None:
830 token = args.strip()
831 if not token:
832 import webbrowser
834 webbrowser.open("https://huggingface.co/settings/tokens")
835 self.notify(msg.CHAT_LOGIN_PROMPT)
836 return
837 self._run_hf_login(token)
839 @work(thread=True)
840 def _run_hf_login(self, token: str) -> None:
841 try:
842 from huggingface_hub import login
844 login(token=token, add_to_git_credential=False)
845 call_from_thread(self, self.notify, msg.CHAT_LOGGED_IN)
846 except Exception as exc:
847 log.warning("HuggingFace login failed", exc_info=True)
848 call_from_thread(
849 self, self.notify, msg.CHAT_LOGIN_FAILED.format(error=exc), severity="error"
850 )
852 def _cmd_model(self, args: str) -> None:
853 if args:
854 apply_active_model(self.app, "chat_model", args)
855 self.app.title = f"lilbee -- {cfg.chat_model}"
856 self.notify(msg.CMD_MODEL_SET.format(name=cfg.chat_model))
857 self.apply_model_change()
858 self.refresh_model_bar()
859 else:
860 from lilbee.cli.tui.screens.catalog import CatalogScreen
862 self.app.push_screen(CatalogScreen())
864 def _cmd_quit(self, _args: str) -> None:
865 self.app.exit()
867 def _cmd_remove(self, args: str) -> None:
868 name = args.strip()
869 if not name:
870 self.notify(msg.CMD_REMOVE_USAGE, severity="warning")
871 return
872 self._run_remove_model(name)
874 @work(thread=True)
875 def _run_remove_model(self, name: str) -> None:
876 mgr = get_services().model_manager
877 if not mgr.is_installed(name):
878 call_from_thread(
879 self, self.notify, msg.CMD_REMOVE_NOT_FOUND.format(name=name), severity="error"
880 )
881 return
882 try:
883 removed = mgr.remove(name)
884 if removed:
885 call_from_thread(self, self.notify, msg.CMD_REMOVE_SUCCESS.format(name=name))
886 else:
887 call_from_thread(
888 self, self.notify, msg.CMD_REMOVE_FAILED.format(name=name), severity="error"
889 )
890 except Exception:
891 log.warning("Remove failed for %s", name, exc_info=True)
892 call_from_thread(
893 self, self.notify, msg.CMD_REMOVE_FAILED.format(name=name), severity="error"
894 )
896 def _cmd_reset(self, args: str) -> None:
897 from lilbee.cli.tui.widgets.confirm_dialog import ConfirmDialog
899 def _on_confirm(confirmed: bool | None) -> None:
900 if not confirmed:
901 return
902 from lilbee.app.reset import perform_reset
904 try:
905 result = perform_reset()
906 except Exception as exc:
907 log.warning("Reset failed", exc_info=True)
908 self.notify(msg.CMD_RESET_FAILED.format(error=exc), severity="error")
909 return
911 # Reopen LanceDB against the now-empty data dir; keep providers loaded.
912 reset_store()
914 if result.skipped:
915 self.notify(
916 msg.CMD_RESET_PARTIAL.format(skipped=len(result.skipped)),
917 severity="warning",
918 )
919 else:
920 self.notify(msg.CMD_RESET_SUCCESS)
922 self.app.push_screen(
923 ConfirmDialog("Reset Knowledge Base", "This will permanently delete all data."),
924 _on_confirm,
925 )
927 def _cmd_set(self, args: str) -> None:
928 if not args:
929 return
930 parts = args.split(None, 1)
931 key = parts[0]
932 value = parts[1] if len(parts) > 1 else ""
934 if key not in SETTINGS_MAP:
935 self.notify(msg.CMD_SET_UNKNOWN.format(key=key), severity="warning")
936 return
938 defn = SETTINGS_MAP[key]
939 if not defn.writable:
940 self.notify(msg.CMD_SET_READONLY.format(key=key), severity="warning")
941 return
942 try:
943 if defn.type is bool:
944 parsed = value.lower() in ("true", "1", "yes", "on")
945 elif defn.nullable and value.lower() in ("none", "null", ""):
946 parsed = None
947 else:
948 parsed = defn.type(value)
949 setattr(cfg, key, parsed)
950 persisted = str(parsed) if parsed is not None else ""
951 settings.set_value(cfg.data_root, key, persisted)
952 if key == "llm_provider": # pragma: no cover
953 reset_services()
954 self.notify(msg.CMD_SET_SUCCESS.format(key=key, value=parsed))
955 except (ValueError, TypeError) as exc:
956 self.notify(msg.CMD_SET_INVALID.format(key=key, error=exc), severity="error")
958 def _cmd_settings(self, _args: str) -> None:
959 self.app.switch_view("Settings")
961 def _cmd_setup(self, _args: str) -> None:
962 from lilbee.cli.tui.screens.setup import SetupWizard
964 self.app.push_screen(SetupWizard(), self._on_setup_complete)
966 def _cmd_status(self, _args: str) -> None:
967 self.app.switch_view("Status")
969 def _cmd_theme(self, args: str) -> None:
970 if args:
971 self.app.set_theme(args)
972 self.notify(msg.THEME_SET.format(name=args))
973 else:
974 theme_list = msg.CMD_THEME_LIST.format(names=", ".join(DARK_THEMES))
975 self.notify(theme_list, severity="information")
977 def _cmd_version(self, _args: str) -> None:
978 self.notify(msg.CHAT_VERSION.format(version=get_version()))
980 def _cmd_wiki(self, _args: str) -> None:
981 if not cfg.wiki:
982 self.notify(msg.CMD_WIKI_DISABLED, severity="warning")
983 return
984 self.app.switch_view("Wiki")
986 def _send_message(self, text: str) -> None:
987 """Send a user message and stream the response."""
988 from textual.css.query import NoMatches
990 log = self._chat_log
991 with contextlib.suppress(NoMatches):
992 log.query_one("#chat-welcome", ChatWelcome).remove()
993 log.mount(UserMessage(text))
995 # The assistant bubble owns its own ThinkingHeader animator until
996 # the first reasoning or content token swaps it out.
997 assistant_msg = AssistantMessage()
998 log.mount(assistant_msg)
999 log.scroll_end(animate=False)
1001 with self._history_lock:
1002 self._history.append({"role": "user", "content": text})
1003 self.streaming = True
1004 self._stream_response(text, assistant_msg, self._current_chunk_type())
1006 def _current_chunk_type(self) -> str | None:
1007 """Translate the ScopeChip selection into a ``chunk_type`` arg.
1009 Returns ``None`` for "both" (no filter) and the raw/wiki string
1010 otherwise. Defaults to ``None`` when the ScopeChip isn't mounted
1011 (e.g. test apps that compose the screen without it).
1012 """
1013 from textual.css.query import NoMatches
1015 from lilbee.cli.tui.widgets.scope_chip import ScopeChip
1017 try:
1018 chip = self.query_one("#scope-chip", ScopeChip)
1019 except NoMatches:
1020 return None
1021 return scope_to_chunk_type(chip.scope)
1023 @work(thread=True)
1024 def _stream_response(
1025 self, question: str, widget: AssistantMessage, chunk_type: str | None
1026 ) -> None:
1027 """Stream LLM response in a background thread, coalescing UI updates."""
1028 response_parts: list[str] = []
1029 sources: list[str] = []
1030 stream: Any = None
1031 try:
1032 with self._history_lock:
1033 history_snapshot = self._history[:-1]
1034 stream = get_services().searcher.ask_stream(
1035 question, history=history_snapshot, chunk_type=chunk_type
1036 )
1037 self._consume_stream(stream, widget, response_parts)
1038 except Exception as exc:
1039 log.debug("Stream error", exc_info=True)
1040 with contextlib.suppress(Exception):
1041 call_from_thread(self, widget.append_content, msg.STREAM_ERROR.format(error=exc))
1042 finally:
1043 close_stream(stream)
1044 self._finalize_stream(widget, sources, response_parts)
1046 def _consume_stream(
1047 self, stream: Any, widget: AssistantMessage, response_parts: list[str]
1048 ) -> None:
1049 """Pull tokens off *stream*, batching UI updates to ~50 ms windows."""
1050 worker = _get_worker()
1051 reason_buf: list[str] = []
1052 content_buf: list[str] = []
1053 timings = [time.monotonic(), 0.0] # [last_flush, last_scroll]
1055 def flush() -> None:
1056 if reason_buf:
1057 call_from_thread(self, widget.append_reasoning, "".join(reason_buf))
1058 reason_buf.clear()
1059 if content_buf:
1060 call_from_thread(self, widget.append_content, "".join(content_buf))
1061 content_buf.clear()
1063 for token in stream:
1064 if worker.is_cancelled:
1065 break
1066 try:
1067 self._buffer_token(token, reason_buf, content_buf, response_parts)
1068 self._maybe_flush_and_scroll(flush, timings)
1069 except Exception:
1070 break # App shutting down (Ctrl-C) -- stop streaming
1071 with contextlib.suppress(Exception):
1072 flush()
1074 @staticmethod
1075 def _buffer_token(
1076 token: Any,
1077 reason_buf: list[str],
1078 content_buf: list[str],
1079 response_parts: list[str],
1080 ) -> None:
1081 """Append *token* to the right buffer; record response content for history."""
1082 if token.is_reasoning:
1083 reason_buf.append(token.content)
1084 elif token.content:
1085 response_parts.append(token.content)
1086 content_buf.append(token.content)
1088 def _maybe_flush_and_scroll(self, flush: Callable[[], None], timings: list[float]) -> None:
1089 """Run *flush* and the auto-scroll on their respective intervals."""
1090 now = time.monotonic()
1091 if now - timings[0] >= _STREAM_FLUSH_INTERVAL:
1092 flush()
1093 timings[0] = now
1094 if now - timings[1] >= _STREAM_SCROLL_INTERVAL:
1095 call_from_thread(self, self._scroll_to_bottom)
1096 timings[1] = now
1098 def _finalize_stream(
1099 self, widget: AssistantMessage, sources: list[str], response_parts: list[str]
1100 ) -> None:
1101 """Persist the assistant turn and update the widget. Always runs."""
1102 # _stream_response runs in a worker thread; reactive setters mutate
1103 # widgets, so the streaming flag must flip on the main thread.
1104 call_from_thread(self, self._set_streaming, False)
1105 full_response = "".join(response_parts)
1106 if full_response:
1107 with self._history_lock:
1108 self._history.append({"role": "assistant", "content": full_response})
1109 self._trim_history()
1110 call_from_thread(self, widget.finish, sources)
1111 call_from_thread(self, self._scroll_to_bottom)
1112 if (
1113 cfg.chat_mode == ChatMode.SEARCH.value
1114 and self._embedding_ready()
1115 and full_response
1116 and "\n\nSources:\n" not in full_response
1117 ):
1118 call_from_thread(self, self._notify_no_results)
1120 def _notify_no_results(self) -> None:
1121 self.notify(msg.CHAT_MODE_SEARCH_NO_RESULTS, severity="warning")
1123 def _trim_history(self) -> None:
1124 """Trim history to max size, dropping oldest messages. Caller must hold _history_lock."""
1125 if len(self._history) > _MAX_HISTORY_MESSAGES:
1126 self._history[:] = self._history[-_MAX_HISTORY_MESSAGES:]
1128 def _scroll_to_bottom(self) -> None:
1129 log_widget = self._chat_log
1130 # Only auto-scroll while the user is still tailing the output.
1131 # If they scrolled up to read, don't yank them back.
1132 if log_widget.max_scroll_y - log_widget.scroll_y < _AUTO_SCROLL_TAIL_LINES:
1133 log_widget.scroll_end(animate=False)
1135 def action_scroll_up(self) -> None:
1136 self._chat_log.scroll_page_up()
1138 def action_scroll_down(self) -> None:
1139 self._chat_log.scroll_page_down()
1141 def check_action(self, action: str, parameters: tuple[object, ...]) -> bool | None:
1142 """Keep the footer honest about mode-dependent bindings.
1144 - ``cancel_stream`` (Ctrl+C) only does something while streaming in
1145 INSERT mode; otherwise the App's Quit binding takes the slot.
1146 - ``focus_model_bar`` (``m``) is a NORMAL-mode shortcut; in INSERT
1147 mode the focused chat input types the literal character, so the
1148 ``m Models`` hint would lie.
1149 """
1150 if action == "cancel_stream":
1151 return self.streaming and self._insert_mode
1152 if action == "focus_model_bar":
1153 return not self._insert_mode
1154 return super().check_action(action, parameters)
1156 def action_enter_normal_mode(self) -> None:
1157 """Esc dismisses the overlay if visible; otherwise drops into NORMAL mode."""
1158 overlay = self._completion_overlay
1159 if overlay.is_visible:
1160 overlay.hide()
1161 return
1162 if isinstance(self.focused, (Select, ModelPickerButton)):
1163 # Returning from a model picker should put us back in INSERT
1164 # so the user can type their next prompt; routing through the
1165 # helper makes sure can_focus is re-enabled.
1166 self._enter_insert_mode()
1167 return
1168 self._insert_mode = False
1169 # Make the chat input unfocusable in NORMAL mode so Tab traversal
1170 # skips past it AND a programmatic focus restore (modal close,
1171 # screen pop) cannot land on it. The user re-enters INSERT
1172 # explicitly via i/a/o/Enter or by clicking the input.
1173 self._chat_input.can_focus = False
1174 self._chat_log.focus()
1175 self._update_input_style()
1177 def action_cancel_stream(self) -> None:
1178 """Cancel an in-flight chat stream. Bound to Ctrl+C from INSERT mode."""
1179 if self.streaming:
1180 self._cancel_inflight_stream()
1182 def _cancel_inflight_stream(self) -> None:
1183 """Stop the streaming Textual worker AND interrupt its inference call.
1185 Cancelling the Textual worker alone unwinds the producer task but
1186 does not reach into the chat subprocess; the worker subprocess
1187 keeps generating until ``Services.cancel_inference()`` flips its
1188 abort flag (or sets the in-process Event in fallback mode).
1189 """
1190 get_services().cancel_inference()
1191 for worker in self.workers:
1192 worker.cancel()
1193 self.streaming = False
1195 def apply_model_change(self) -> None:
1196 """Cancel active stream (if any) and reset services for the new model."""
1197 if self.streaming:
1198 self.action_cancel_stream()
1199 self.call_later(self._deferred_service_reset)
1200 else:
1201 reset_services()
1203 def _deferred_service_reset(self) -> None:
1204 """Reset services once workers have drained."""
1205 if self.workers:
1206 self.call_later(self._deferred_service_reset)
1207 return
1208 reset_services()
1210 async def action_toggle_markdown(self) -> None:
1211 """Toggle between Markdown and plain-text rendering for chat responses."""
1212 cfg.markdown_rendering = not cfg.markdown_rendering
1213 use_md = cfg.markdown_rendering
1214 chat_log = self._chat_log
1215 for widget in chat_log.query(AssistantMessage):
1216 await widget.rebuild_content_widget(use_md)
1217 label = "Markdown" if use_md else "Plain text"
1218 self.notify(msg.CHAT_RENDERING.format(label=label))
1220 def _run_sync(self) -> None:
1221 """Enqueue a document sync in the task bar."""
1222 if self._sync_active:
1223 self.notify(msg.SYNC_ALREADY_ACTIVE, severity="warning")
1224 return
1225 from lilbee.cli.tui.task_queue import TaskType
1227 self._sync_active = True
1228 # Clear the pending hint so the bar shows live sync progress
1229 # instead of the stale "N docs to sync" line.
1230 self._task_bar.clear_pending_sync()
1232 def _target(reporter: ProgressReporter) -> None:
1233 try:
1234 self._do_sync(reporter)
1235 finally:
1236 self._sync_active = False
1237 # Re-detect after every sync attempt: success drives the
1238 # count to 0, failure or cancel leaves the still-pending
1239 # files counted so the hint reappears.
1240 self._task_bar.start_detect_pending()
1242 self._task_bar.start_task("Sync documents", TaskType.SYNC, _target, indeterminate=True)
1244 def _do_sync(self, reporter: ProgressReporter) -> None:
1245 """Sync body. Runs on worker thread."""
1246 from lilbee.data.ingest import sync
1248 reporter.update(0, msg.SYNC_STATUS_SYNCING, indeterminate=True)
1249 on_progress = build_sync_progress_callback(reporter)
1250 try:
1251 result = asyncio_loop.run(sync(quiet=True, on_progress=on_progress))
1252 except asyncio.CancelledError as exc:
1253 raise RuntimeError(msg.SYNC_CANCELLED_RESUME) from exc
1254 if result.failed:
1255 raise RuntimeError(msg.SYNC_FAILED_FILES.format(files=", ".join(result.failed)))
1256 if result.skipped:
1257 call_from_thread(
1258 self,
1259 self.notify,
1260 msg.sync_skipped_message(", ".join(result.skipped)),
1261 severity="warning",
1262 )
1264 def action_focus_commands(self) -> None:
1265 """Focus chat input and pre-fill with '/' for command entry."""
1266 # Route through the helper so can_focus is re-enabled when this
1267 # action fires from NORMAL mode; bare ``inp.focus()`` would
1268 # silently no-op while the input is intentionally unfocusable.
1269 self._enter_insert_mode()
1270 inp = self._chat_input
1271 if not inp.value.startswith("/"):
1272 inp.value = "/"
1273 inp.action_end()
1275 def action_focus_model_bar(self) -> None:
1276 """Focus the chat-model picker button in the model bar (normal mode only)."""
1277 if self._insert_mode:
1278 raise SkipAction()
1279 with contextlib.suppress(NoMatches):
1280 self.query_one("#chat-model-button", ModelPickerButton).focus()
1282 def action_toggle_chat_mode(self) -> None:
1283 """F3: flip between Search and Chat mode."""
1284 try:
1285 toggle = self.query_one(ChatModeToggle)
1286 except NoMatches:
1287 return
1288 if not toggle.toggle():
1289 return
1290 label = (
1291 msg.CHAT_MODE_SEARCH_LABEL
1292 if cfg.chat_mode == ChatMode.SEARCH.value
1293 else msg.CHAT_MODE_CHAT_LABEL
1294 )
1295 self.notify(msg.CHAT_MODE_SET.format(label=label))
1297 def action_cycle_scope(self) -> None:
1298 """``s``: cycle the scope chip when it is currently visible."""
1299 from lilbee.cli.tui.widgets.scope_chip import ScopeChip
1301 try:
1302 chip = self.query_one("#scope-chip", ScopeChip)
1303 except NoMatches:
1304 return
1305 if chip.has_class("-hidden"):
1306 return
1307 chip.cycle_scope()
1309 def action_complete(self) -> None:
1310 """Tab: cycle autocomplete, insert a literal tab, or advance focus.
1312 - Insert mode + chat input focused + completion overlay open:
1313 cycle the next completion candidate.
1314 - Insert mode + chat input focused + no completion: insert
1315 ``\\t`` so users can type tab characters directly.
1316 - Normal mode or focus elsewhere: advance through the focus
1317 chain so Tab still walks every focusable widget.
1318 """
1319 inp = self._chat_input
1320 if not self._insert_mode or not inp.has_focus:
1321 self.screen.focus_next()
1322 return
1323 if self._cycle_completion_forward(inp):
1324 return
1325 inp.insert("\t")
1327 def action_complete_next(self) -> None:
1328 """Ctrl+N: highlight-only nav when open, else show + insert (vim ``<C-n>``)."""
1329 inp = self._chat_input
1330 if not inp.has_focus:
1331 return
1332 overlay = self._completion_overlay
1333 if overlay.is_visible:
1334 overlay.cycle_next()
1335 return
1336 self._cycle_completion_forward(inp)
1338 def _cycle_completion_forward(self, inp: ChatInput) -> bool:
1339 """Show or cycle forward through autocomplete; returns True if it acted."""
1340 overlay = self._completion_overlay
1342 if overlay.is_visible:
1343 selection = overlay.cycle_next()
1344 if selection:
1345 cmd_prefix = inp.value.split()[0] + " " if " " in inp.value else ""
1346 self._completing = True
1347 inp.value = cmd_prefix + selection
1348 self._completing = False
1349 inp.action_end()
1350 return True
1352 options = get_completions(inp.value)
1353 if options:
1354 overlay.show_completions(options)
1355 first = overlay.get_current()
1356 self._completing = True
1357 if first and " " in inp.value:
1358 cmd_prefix = inp.value.split()[0] + " "
1359 inp.value = cmd_prefix + first
1360 inp.action_end()
1361 elif first:
1362 inp.value = first
1363 inp.action_end()
1364 self._completing = False
1365 return True
1367 return False
1369 def action_complete_prev(self) -> None:
1370 """Highlight-only nav when open, else show + insert (mirror of complete_next)."""
1371 inp = self._chat_input
1372 if not inp.has_focus:
1373 return
1374 overlay = self._completion_overlay
1375 if overlay.is_visible:
1376 overlay.cycle_prev()
1377 return
1379 options = get_completions(inp.value)
1380 if options:
1381 overlay.show_completions(options)
1382 last = overlay.get_current()
1383 self._completing = True
1384 if last and " " in inp.value:
1385 cmd_prefix = inp.value.split()[0] + " "
1386 inp.value = cmd_prefix + last
1387 inp.action_end()
1388 elif last:
1389 inp.value = last
1390 inp.action_end()
1391 self._completing = False
1393 def action_history_prev(self) -> None:
1394 """Up arrow: cycle the dropdown if visible, else recall previous history entry."""
1395 if not self._insert_mode:
1396 raise SkipAction()
1397 inp = self._chat_input
1398 if not inp.has_focus:
1399 raise SkipAction()
1400 # When the completion dropdown is up, Up navigates the dropdown
1401 # (vim/Emacs-style) rather than recalling history.
1402 overlay = self._completion_overlay
1403 if overlay.is_visible:
1404 overlay.cycle_prev()
1405 return
1406 if not self._input_history:
1407 raise SkipAction()
1408 if self._history_index == -1:
1409 self._history_index = len(self._input_history) - 1
1410 elif self._history_index > 0:
1411 self._history_index -= 1
1412 else:
1413 return
1414 inp.value = self._input_history[self._history_index]
1415 inp.action_end()
1417 def action_history_next(self) -> None:
1418 """Down arrow: cycle the dropdown if visible, else recall next history entry."""
1419 if not self._insert_mode:
1420 raise SkipAction()
1421 inp = self._chat_input
1422 if not inp.has_focus:
1423 raise SkipAction()
1424 # When the completion dropdown is up, Down navigates the dropdown.
1425 overlay = self._completion_overlay
1426 if overlay.is_visible:
1427 overlay.cycle_next()
1428 return
1429 if self._history_index == -1:
1430 raise SkipAction()
1431 if self._history_index < len(self._input_history) - 1:
1432 self._history_index += 1
1433 inp.value = self._input_history[self._history_index]
1434 inp.action_end()
1435 else:
1436 self._history_index = -1
1437 inp.value = ""
1439 @on(ChatInput.Changed, "#chat-input")
1440 def _on_chat_input_changed(self, event: ChatInput.Changed) -> None:
1441 """Refresh arg-hint and auto-show or hide the completion dropdown."""
1442 if self._completing:
1443 # Tab-completion is mid-flight; the cycler manages overlay state.
1444 self._refresh_arg_hint()
1445 return
1446 self._refresh_completion_overlay()
1447 self._refresh_arg_hint()
1449 def _refresh_completion_overlay(self) -> None:
1450 """Auto-show the dropdown for COMMAND discovery only; arg completions stay on Tab."""
1451 overlay = self._completion_overlay
1452 text = self._chat_input.value
1453 # Once the user has typed a space, they are in arg-completion mode.
1454 # Leave any Tab-triggered overlay alone and don't auto-pop one.
1455 if " " in text:
1456 return
1457 options = get_completions(text)
1458 if options:
1459 overlay.show_completions(options)
1460 elif overlay.is_visible:
1461 overlay.hide()
1463 def _refresh_arg_hint(self) -> None:
1464 """Push the current input value into the ArgHintLine."""
1465 self._arg_hint.update_for_input(self._chat_input.value)
1467 def refresh_model_bar(self) -> None:
1468 """Re-scan installed models and refresh the dropdowns."""
1469 self.query_one("#model-bar", ModelBar).refresh_models()
1471 def action_vim_scroll_down(self) -> None:
1472 """Vim j: scroll down in normal mode."""
1473 if self._insert_mode:
1474 raise SkipAction()
1475 self._chat_log.scroll_down()
1477 def action_vim_scroll_up(self) -> None:
1478 """Vim k: scroll up in normal mode."""
1479 if self._insert_mode:
1480 raise SkipAction()
1481 self._chat_log.scroll_up()
1483 def action_vim_scroll_home(self) -> None:
1484 """Vim g: scroll to top in normal mode."""
1485 if self._insert_mode:
1486 raise SkipAction()
1487 self._chat_log.scroll_home()
1489 def action_vim_scroll_end(self) -> None:
1490 """Vim G: scroll to bottom in normal mode."""
1491 if self._insert_mode:
1492 raise SkipAction()
1493 self._chat_log.scroll_end()
1495 def action_half_page_down(self) -> None:
1496 """Ctrl-D: half-page down (vim style)."""
1497 log_widget = self._chat_log
1498 half = max(1, log_widget.size.height // 2)
1499 log_widget.scroll_relative(y=half)
1501 def action_half_page_up(self) -> None:
1502 """Ctrl-U: half-page up (vim style)."""
1503 log_widget = self._chat_log
1504 half = max(1, log_widget.size.height // 2)
1505 log_widget.scroll_relative(y=-half)