Coverage for src / lilbee / providers / llama_cpp / abort_signal.py: 100%
12 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-05-15 20:55 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-05-15 20:55 +0000
1"""Process-wide abort flag wired into llama_cpp.Llama's abort_callback."""
3from __future__ import annotations
5import threading
6from typing import Any
8_abort = threading.Event()
11def request_abort() -> None:
12 """Set the process-wide abort flag. Polled by ggml every N tokens."""
13 _abort.set()
16def clear_abort() -> None:
17 """Clear the abort flag so the next inference runs to completion."""
18 _abort.clear()
21def is_abort_set() -> bool:
22 """Return True if request_abort() has been called and not cleared."""
23 return _abort.is_set()
26def abort_callback(_user_data: Any = None) -> bool:
27 """ggml abort_callback: returns True iff request_abort() has fired."""
28 return _abort.is_set()