Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from instructor import Instructor, AsyncInstructor
from jinja2 import Template
from textwrap import dedent
from typing import Type, TypeVar
from typing import Type, TypeVar, Any

T = TypeVar("T", bound=BaseModel)

Expand Down Expand Up @@ -115,35 +115,51 @@ def set_client(self, client: Instructor):
self._client = client

def predict(
self, query: str, model: str, response_model: Type[T], client: Instructor
self,
query: str,
model: str,
response_model: Type[T],
client: Instructor,
hooks: Any | None = None,
):
system_message = self.to_system_messages()
user_query = self.get_user_query(query)
return client.create(
model=model,
response_model=response_model,
messages=[
kwargs = {
"model": model,
"response_model": response_model,
"messages": [
{"role": "system", "content": system_message},
{"role": "user", "content": user_query},
],
validation_context={
"validation_context": {
"labels": self.get_labels(),
},
)
}
if hooks is not None:
kwargs["hooks"] = hooks
return client.create(**kwargs)

async def apredict(
self, query: str, model: str, response_model: Type[T], client: AsyncInstructor
self,
query: str,
model: str,
response_model: Type[T],
client: AsyncInstructor,
hooks: Any | None = None,
):
system_message = self.to_system_messages()
user_query = self.get_user_query(query)
return await client.create(
messages=[
kwargs = {
"messages": [
{"role": "system", "content": system_message},
{"role": "user", "content": user_query},
],
model=model,
response_model=response_model,
validation_context={
"model": model,
"response_model": response_model,
"validation_context": {
"labels": self.get_labels(),
},
)
}
if hooks is not None:
kwargs["hooks"] = hooks
return await client.create(**kwargs)
80 changes: 50 additions & 30 deletions latest/case_study/core/summarization.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class ConversationSummary(BaseModel):
async def conversation_summary_v1(
client, # instructor-patched client
messages: List[Dict[str, Any]],
hooks: Any | None = None,
) -> ConversationSummary:
"""
Generate a concise summary focused on key topics and searchable terms.
Expand Down Expand Up @@ -61,19 +62,23 @@ async def conversation_summary_v1(
Generate a concise, searchable summary of this conversation.
"""

response = await client.chat.completions.create(
response_model=ConversationSummary,
messages=[{"role": "user", "content": prompt}],
context={"messages": messages},
max_retries=3,
)
kwargs = {
"response_model": ConversationSummary,
"messages": [{"role": "user", "content": prompt}],
"context": {"messages": messages},
"max_retries": 3,
}
if hooks is not None:
kwargs["hooks"] = hooks
response = await client.chat.completions.create(**kwargs)

return response


async def conversation_summary_v2(
client, # instructor-patched client
messages: List[Dict[str, Any]],
hooks: Any | None = None,
) -> ConversationSummary:
"""
Generate a comprehensive summary that captures conversation patterns, themes, and user interaction dynamics.
Expand Down Expand Up @@ -278,25 +283,29 @@ async def conversation_summary_v2(
{% endfor %}
"""

response = await client.chat.completions.create(
response_model=ConversationSummary,
messages=[
kwargs = {
"response_model": ConversationSummary,
"messages": [
{
"role": "system",
"content": "You are an expert at analyzing and categorizing human-AI conversations, focusing on patterns, themes, interaction characteristics, and user experience dynamics across all types of conversations.",
},
{"role": "user", "content": prompt},
],
context={"messages": messages},
max_retries=3,
)
"context": {"messages": messages},
"max_retries": 3,
}
if hooks is not None:
kwargs["hooks"] = hooks
response = await client.chat.completions.create(**kwargs)

return response


async def conversation_summary_v3(
client, # instructor-patched client
messages: List[Dict[str, Any]],
hooks: Any | None = None,
) -> ConversationSummary:
"""
Generate a concise pattern-focused summary that bridges content and patterns.
Expand Down Expand Up @@ -361,25 +370,29 @@ async def conversation_summary_v3(
Generate a concise, pattern-aware summary that balances content and conversation dynamics.
"""

response = await client.chat.completions.create(
response_model=ConversationSummary,
messages=[
kwargs = {
"response_model": ConversationSummary,
"messages": [
{
"role": "system",
"content": "You are an expert at creating concise summaries that capture both what was discussed and how the conversation unfolded. Balance pattern recognition with content summary.",
},
{"role": "user", "content": prompt},
],
context={"messages": messages},
max_retries=3,
)
"context": {"messages": messages},
"max_retries": 3,
}
if hooks is not None:
kwargs["hooks"] = hooks
response = await client.chat.completions.create(**kwargs)

return response


async def conversation_summary_v4(
client, # instructor-patched client
messages: List[Dict[str, Any]],
hooks: Any | None = None,
) -> ConversationSummary:
"""
Generate a pattern-focused summary optimized for v2-style queries.
Expand Down Expand Up @@ -461,25 +474,29 @@ async def conversation_summary_v4(
Generate a pattern-focused summary that would be discoverable by researchers looking for specific conversation types and interaction patterns.
"""

response = await client.chat.completions.create(
response_model=ConversationSummary,
messages=[
kwargs = {
"response_model": ConversationSummary,
"messages": [
{
"role": "system",
"content": "You are an expert at categorizing conversations by their patterns, types, and interaction dynamics. Focus on creating summaries that match how researchers search for conversation patterns rather than specific content.",
},
{"role": "user", "content": prompt},
],
context={"messages": messages},
max_retries=3,
)
"context": {"messages": messages},
"max_retries": 3,
}
if hooks is not None:
kwargs["hooks"] = hooks
response = await client.chat.completions.create(**kwargs)

return response


async def conversation_summary_v5(
client, # instructor-patched client
messages: List[Dict[str, Any]],
hooks: Any | None = None,
) -> ConversationSummary:
"""
Generate summaries optimized for AI agent failure analysis and improvement identification.
Expand Down Expand Up @@ -539,17 +556,20 @@ async def conversation_summary_v5(
Generate a balanced summary optimized for both pattern and content queries.
"""

response = await client.chat.completions.create(
response_model=ConversationSummary,
messages=[
kwargs = {
"response_model": ConversationSummary,
"messages": [
{
"role": "system",
"content": "You are an expert at analyzing AI conversations for system improvement. Focus on identifying failure patterns, root causes, and specific opportunities for enhancement. Your summaries should enable data-driven decisions about where to invest development effort.",
},
{"role": "user", "content": prompt},
],
context={"messages": messages},
max_retries=3,
)
"context": {"messages": messages},
"max_retries": 3,
}
if hooks is not None:
kwargs["hooks"] = hooks
response = await client.chat.completions.create(**kwargs)

return response
Loading