Coverage for src / lilbee / cli / tui / widgets / message.py: 100%

135 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-05-15 20:55 +0000

1"""Chat message widgets: user and assistant bubbles.""" 

2 

3from __future__ import annotations 

4 

5import time 

6from pathlib import Path 

7from typing import ClassVar 

8 

9from textual.app import ComposeResult 

10from textual.containers import Vertical 

11from textual.content import Content 

12from textual.widgets import Collapsible, Markdown, Static 

13 

14from lilbee.cli.tui import messages as msg 

15from lilbee.cli.tui.pill import pill 

16from lilbee.cli.tui.widgets.thinking_header import ThinkingHeader 

17from lilbee.core.config import cfg 

18 

19# Minimum interval (seconds) between markdown widget updates during streaming 

20_MD_UPDATE_INTERVAL = 0.1 

21 

22_SPEAKER_YOU = "[bold $primary]you[/]" 

23_SPEAKER_LILBEE = "[bold $success]lilbee[/]" 

24 

25_REASONING_BLOCK_CLASS = "reasoning-block" 

26_REASONING_STREAMING_CLASS = "-streaming" 

27 

28_CSS_FILE = Path(__file__).parent / "message.tcss" 

29_MESSAGE_CSS = _CSS_FILE.read_text(encoding="utf-8") 

30 

31 

32class UserMessage(Vertical): 

33 """A user's question in the chat log.""" 

34 

35 DEFAULT_CSS: ClassVar[str] = _MESSAGE_CSS 

36 

37 def __init__(self, text: str) -> None: 

38 super().__init__(classes="user-message") 

39 self._text = text 

40 

41 def compose(self) -> ComposeResult: 

42 yield Static(_SPEAKER_YOU, classes="speaker-label") 

43 yield Static(self._text, classes="message-content") 

44 

45 

46class AssistantMessage(Vertical): 

47 """An assistant's response with streaming markdown, reasoning, and citations.""" 

48 

49 DEFAULT_CSS: ClassVar[str] = _MESSAGE_CSS 

50 

51 def __init__(self) -> None: 

52 super().__init__(classes="assistant-message") 

53 self._reasoning_parts: list[str] = [] 

54 self._content_parts: list[str] = [] 

55 self._finished = False 

56 self._content_widget: Markdown | Static | None = None 

57 self._reasoning_widget: Collapsible | None = None 

58 self._reasoning_static: Static | None = None 

59 self._citation_widget: Static | None = None 

60 self._thinking_header: ThinkingHeader | None = None 

61 self._last_md_update: float = 0.0 

62 self._last_reasoning_update: float = 0.0 

63 self._use_markdown: bool = cfg.markdown_rendering 

64 

65 def compose(self) -> ComposeResult: 

66 yield Static(_SPEAKER_LILBEE, classes="speaker-label") 

67 self._content_widget = self._build_content_widget() 

68 yield self._content_widget 

69 self._citation_widget = Static("", classes="source-citation") 

70 yield self._citation_widget 

71 

72 def on_mount(self) -> None: 

73 """Mount the thinking header above the content widget. 

74 

75 ``compose`` populates ``_content_widget`` before this hook runs. 

76 """ 

77 if self._content_widget is None: 

78 return 

79 header = ThinkingHeader() 

80 self._thinking_header = header 

81 self.mount(header, before=self._content_widget) 

82 

83 def _build_content_widget(self) -> Markdown | Static: 

84 """Create the content widget based on the current rendering mode.""" 

85 if self._use_markdown: 

86 return Markdown("", classes="response-md") 

87 return Static("", classes="response-md") 

88 

89 @property 

90 def use_markdown(self) -> bool: 

91 """Whether this message is using Markdown rendering.""" 

92 return self._use_markdown 

93 

94 async def rebuild_content_widget(self, use_markdown: bool) -> None: 

95 """Replace the content widget with a different rendering mode.""" 

96 if self._content_widget is None: 

97 return 

98 self._use_markdown = use_markdown 

99 old = self._content_widget 

100 new_widget = self._build_content_widget() 

101 text = "".join(self._content_parts) 

102 new_widget.update(text) 

103 await self.mount(new_widget, after=old) 

104 self._content_widget = new_widget 

105 await old.remove() 

106 

107 def append_reasoning(self, text: str) -> None: 

108 """Append a reasoning token; debounced at ``_MD_UPDATE_INTERVAL``.""" 

109 first_token = not self._reasoning_parts 

110 self._reasoning_parts.append(text) 

111 if first_token and self._reasoning_widget is None: 

112 self._mount_reasoning_collapsible() 

113 now = time.monotonic() 

114 ready = now - self._last_reasoning_update >= _MD_UPDATE_INTERVAL 

115 if self._reasoning_static is not None and ready: 

116 self._last_reasoning_update = now 

117 self._reasoning_static.update("".join(self._reasoning_parts)) 

118 

119 def append_content(self, text: str) -> None: 

120 """Append response content token (debounced markdown updates).""" 

121 first_token = not self._content_parts 

122 self._content_parts.append(text) 

123 if first_token and not self._reasoning_parts: 

124 # No reasoning ever arrived; drop the standalone header. 

125 self._dismiss_thinking_header() 

126 now = time.monotonic() 

127 if self._content_widget is not None and now - self._last_md_update >= _MD_UPDATE_INTERVAL: 

128 self._last_md_update = now 

129 self._content_widget.update("".join(self._content_parts)) 

130 self.refresh() 

131 

132 def finish(self, sources: list[str] | None = None) -> None: 

133 """Mark response as complete and show citations.""" 

134 self._finished = True 

135 # Always retire the standalone header on finish; the reasoning fold 

136 # (if mounted) carries the post-stream title. 

137 self._dismiss_thinking_header() 

138 if self._content_widget is not None and self._content_parts: 

139 self._content_widget.update("".join(self._content_parts)) 

140 self.refresh() 

141 if self._reasoning_widget is not None and self._reasoning_parts: 

142 if self._reasoning_static is not None: 

143 self._reasoning_static.update("".join(self._reasoning_parts)) 

144 token_count = len("".join(self._reasoning_parts).split()) 

145 self._reasoning_widget.remove_class(_REASONING_STREAMING_CLASS) 

146 self._reasoning_widget.title = msg.CHAT_REASONING_FINISHED.format(tokens=token_count) 

147 self._reasoning_widget.collapsed = True 

148 

149 if sources and self._citation_widget is not None: 

150 self._citation_widget.update(_build_citation_content(sources)) 

151 elif self._citation_widget is not None: 

152 self._citation_widget.display = False 

153 

154 def _mount_reasoning_collapsible(self) -> None: 

155 """Mount the reasoning Collapsible with the streaming-state class. 

156 

157 Called from ``append_reasoning`` on the first reasoning token, after 

158 the message itself is mounted. The Collapsible slots in beneath the 

159 ``ThinkingHeader`` so the animator continues to drive the visual 

160 weight while the toggle row is hidden by the ``-streaming`` rule. 

161 """ 

162 classes = f"{_REASONING_BLOCK_CLASS} {_REASONING_STREAMING_CLASS}" 

163 self._reasoning_static = Static("", classes="reasoning-text") 

164 collapsible = Collapsible( 

165 self._reasoning_static, 

166 title=msg.CHAT_REASONING_FINISHED.format(tokens=0), 

167 collapsed=False, 

168 classes=classes, 

169 ) 

170 self._reasoning_widget = collapsible 

171 header = self._thinking_header 

172 if header is not None and header.is_mounted: 

173 self.mount(collapsible, after=header) 

174 return 

175 content = self._content_widget 

176 if content is not None: 

177 self.mount(collapsible, before=content) 

178 

179 def _dismiss_thinking_header(self) -> None: 

180 """Stop the animator and remove the standalone header from the DOM.""" 

181 header = self._thinking_header 

182 if header is None: 

183 return 

184 header.stop() 

185 if header.is_mounted: 

186 header.remove() 

187 self._thinking_header = None 

188 

189 

190def _build_citation_content(sources: list[str]) -> Content: 

191 """Build a 'sources: pill pill pill' content line from source paths.""" 

192 parts: list[Content] = [Content.styled(msg.CHAT_SOURCES_LABEL, "$text-muted")] 

193 for src in sources: 

194 parts.append(Content(" ")) 

195 parts.append(pill(Path(src).name, "$surface-lighten-2", "$text")) 

196 return Content.assemble(*parts)