Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions examples/01_standalone_sdk/15_condense_command.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
"""
Demonstrate explicit `/condense` command and skill-triggered condensation.

This example uses Gemini 2.5 Pro for both the agent and the condenser. Set
GEMINI_API_KEY in your environment. The model id is `gemini-2.5-pro`.

Usage:
GEMINI_API_KEY=... uv run python examples/01_standalone_sdk/15_condense_command.py
"""

import os

from pydantic import SecretStr

from openhands.sdk import (
LLM,
Agent,
Conversation,
Event,
LLMConvertibleEvent,
get_logger,
)
from openhands.sdk.context import AgentContext, KeywordTrigger, Skill
from openhands.sdk.context.condenser import LLMSummarizingCondenser
from openhands.sdk.tool import Tool
from openhands.tools.task_tracker import TaskTrackerTool


logger = get_logger(__name__)


def make_llm(usage_id: str) -> LLM:
api_key = os.getenv("GEMINI_API_KEY")
assert api_key, "Set GEMINI_API_KEY"
return LLM(model="gemini-2.5-pro", api_key=SecretStr(api_key), usage_id=usage_id)


def main():
# Minimal tools (no terminal needed)
tools = [Tool(name=TaskTrackerTool.name)]
llm_agent = make_llm("agent")
condenser = LLMSummarizingCondenser(
llm=make_llm("condenser"), max_size=10, keep_first=2
)

# Provide a simple knowledge skill that can trigger condensation by name
condense_skill = Skill(
name="condense",
content="When activated, the conversation will be condensed.",
trigger=KeywordTrigger(keywords=["/condense"]),
)

ctx = AgentContext(skills=[condense_skill])

agent = Agent(llm=llm_agent, tools=tools, condenser=condenser, agent_context=ctx)

llm_messages = []

def cb(e: Event):
if isinstance(e, LLMConvertibleEvent):
llm_messages.append(e.to_llm_message())

convo = Conversation(agent=agent, callbacks=[cb], workspace=".")

convo.send_message("Start the conversation with some context.")
convo.run()

# Now request condensation explicitly
print("Requesting condensation via /condense...")
convo.send_message("/condense")
convo.run()

print("Finished. Total LLM messages collected:", len(llm_messages))


if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
PauseEvent,
UserRejectObservation,
)
from openhands.sdk.event.condenser import CondensationRequest
from openhands.sdk.event.conversation_error import ConversationErrorEvent
from openhands.sdk.llm import LLM, Message, TextContent
from openhands.sdk.llm.llm_registry import LLMRegistry
Expand Down Expand Up @@ -228,6 +229,15 @@ def send_message(self, message: str | Message) -> None:
extended_content.append(content)
self._state.activated_knowledge_skills.extend(activated_skill_names)

# Special command via skill: if a 'condense' task skill is activated or
# the user explicitly types '/condense', emit a CondensationRequest.
if "condense" in activated_skill_names or any(
isinstance(c, TextContent) and "/condense" in c.text.lower()
for c in message.content
):
self._on_event(CondensationRequest())
return

user_msg_event = MessageEvent(
source="user",
llm_message=message,
Expand Down
90 changes: 90 additions & 0 deletions tests/sdk/conversation/local/test_condense_command.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
from pydantic import PrivateAttr

from openhands.sdk.agent import Agent
from openhands.sdk.context.condenser import LLMSummarizingCondenser
from openhands.sdk.conversation import Conversation
from openhands.sdk.event import Condensation, CondensationRequest
from openhands.sdk.llm import LLM, Message


class DummyLLM(LLM):
_calls: list[str] = PrivateAttr(default_factory=list)

def __init__(self, *, model: str = "test-model"):
super().__init__(model=model, usage_id="test-llm")

def completion(self, *, messages, tools=None, **kwargs): # type: ignore[override]
return self._basic_response()

def responses(self, *, messages, tools=None, **kwargs): # type: ignore[override]
return self._basic_response()

def _basic_response(self):
from unittest.mock import MagicMock

from litellm.types.utils import ModelResponse

from openhands.sdk.llm import LLMResponse
from openhands.sdk.llm.utils.metrics import MetricsSnapshot, TokenUsage

return LLMResponse(
message=Message(role="assistant", content=[]),
metrics=MetricsSnapshot(
model_name="test",
accumulated_cost=0.0,
max_budget_per_task=0.0,
accumulated_token_usage=TokenUsage(model="test"),
),
raw_response=MagicMock(spec=ModelResponse, id="resp-1"),
)


def test_send_message_with_slash_condense_emits_request():
llm = DummyLLM()
agent = Agent(llm=llm, tools=[])
convo = Conversation(agent=agent)

convo.send_message("/condense")

assert any(isinstance(e, CondensationRequest) for e in convo.state.events)
assert isinstance(convo.state.events[-1], CondensationRequest)


def test_condense_request_triggers_condenser_on_next_step():
llm = DummyLLM()
# Configure condenser to satisfy validator: keep_first < max_size // 2 - 1
# With max_size=10 and keep_first=2, condensation is allowed.
condenser = LLMSummarizingCondenser(
llm=llm.model_copy(update={"usage_id": "cond"}), max_size=10, keep_first=2
)
agent = Agent(llm=llm, tools=[], condenser=condenser)
convo = Conversation(agent=agent)

for i in range(3):
convo.send_message(f"msg {i}")
convo.run()

convo.send_message("/condense please")

convo.run()

assert any(isinstance(e, Condensation) for e in convo.state.events)


def test_condense_skill_trigger_name():
from openhands.sdk.context import AgentContext, KeywordTrigger, Skill

llm = DummyLLM()
skill = Skill(
name="condense",
content="Use condenser now",
trigger=KeywordTrigger(keywords=["/condense"]),
)
agent_context = AgentContext(skills=[skill])

agent = Agent(llm=llm, tools=[], agent_context=agent_context)
convo = Conversation(agent=agent)

convo.send_message("Could you /condense the context?")

assert isinstance(convo.state.events[-1], CondensationRequest)
Loading