Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions tests/integration/test_tool_call_loop_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,14 @@ async def test_client():
session={"default_interactive_mode": True},
)

# Ensure command_prefix is set before building the app
config_with_prefix = config.model_copy(update={"command_prefix": "!/"})

# Build test app using the modern async approach - this handles all initialization automatically
test_app = await build_test_app_async(config_with_prefix)
test_app = await build_test_app_async(config)

# The config is already available from the test_app
app_config = test_app.state.app_config
app_config.command_prefix = "!/"

test_app.state.app_config = app_config

with TestClient(test_app, headers={"Authorization": "Bearer test-key"}) as client:
yield client
Expand Down
90 changes: 90 additions & 0 deletions tests/unit/test_performance_tracker.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import logging
from collections import deque

import pytest
from src import performance_tracker
from src.performance_tracker import (
PerformanceMetrics,
track_phase,
Expand Down Expand Up @@ -33,6 +35,17 @@ def end_phase(self) -> None:
self.ended += 1


def _time_sequence(*values: float):
queue = deque(values)

def _next_time() -> float:
if not queue:
raise AssertionError("No more time values available")
return queue.popleft()

return _next_time


def test_performance_metrics_phase_tracking_and_finalize(
monkeypatch: pytest.MonkeyPatch,
) -> None:
Expand Down Expand Up @@ -107,3 +120,80 @@ def test_track_phase_context_manager_ensures_end_called_on_exception() -> None:

assert dummy.started == ["phase-one"]
assert dummy.ended == 1


def test_track_phase_wraps_start_and_end(monkeypatch):
metrics = PerformanceMetrics()
events: list[tuple[str, str | None]] = []

def fake_start(phase_name: str) -> None:
events.append(("start", phase_name))

def fake_end() -> None:
events.append(("end", None))

monkeypatch.setattr(metrics, "start_phase", fake_start)
monkeypatch.setattr(metrics, "end_phase", fake_end)

with track_phase(metrics, "backend_call"):
events.append(("inside", None))

assert events == [
("start", "backend_call"),
("inside", None),
("end", None),
]


def test_finalize_completes_active_phase(monkeypatch):
time_values = _time_sequence(10.0, 12.5, 15.0)
monkeypatch.setattr(performance_tracker.time, "time", time_values)

metrics = PerformanceMetrics(request_start=5.0)
metrics.start_phase("backend_call")

metrics.finalize()

assert metrics.backend_call_time == 2.5
assert metrics.total_time == 10.0


def test_summary_helpers_include_defaults():
metrics = PerformanceMetrics()
metrics.total_time = 2.3456
metrics.command_processing_time = 0.123
metrics.response_processing_time = 0.456

summary_prefix = metrics._format_summary_prefix()
assert summary_prefix == [
"PERF_SUMMARY session=unknown",
"total=2.346s",
"backend=unknown",
"model=unknown",
"streaming=False",
"commands=False",
]

timing_parts = metrics._format_timing_parts()
assert timing_parts == [
"cmd_proc=0.123s",
"resp_proc=0.456s",
]


def test_track_phase_ends_on_exception(monkeypatch):
metrics = PerformanceMetrics()
called: list[str] = []

def fake_end_phase() -> None:
called.append("end")

monkeypatch.setattr(metrics, "end_phase", fake_end_phase)

try:
with track_phase(metrics, "response_processing"):
raise RuntimeError("boom")
except RuntimeError:
pass

assert called == ["end"]
Loading