Skip to content

test(examples): Add some examples leveraging pydantic-AI and other chatlas alternatives #66

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 31 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
e785f63
Add some examples leveraging pydantic-AI
karangattu Jun 3, 2025
2f19d5a
linting files
karangattu Jun 3, 2025
02df719
remove requirements.txt
karangattu Jun 3, 2025
6205148
add more alternatives to chatlas
karangattu Jun 18, 2025
027acf5
remove comments in tool calling example
karangattu Jun 18, 2025
63e93bd
add extra packages
karangattu Jun 18, 2025
657015b
add missing structured_output for llm
karangattu Jun 18, 2025
7727633
Update pkg-py/tests/playwright/chat/langchain/structured_output/app.py
karangattu Jun 30, 2025
c3d5f53
Update pkg-py/tests/playwright/chat/llama-index/structured_output/app.py
karangattu Jun 30, 2025
d3bdfaa
Update pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py
karangattu Jun 30, 2025
cf765ed
Update pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py
karangattu Jun 30, 2025
25f82b5
Update pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py
karangattu Jun 30, 2025
7cad49a
remove data sci adventure example for pydantic
karangattu Jul 1, 2025
5e13404
update the example to make it into a shiny app
karangattu Jul 1, 2025
8128c03
use context for maintaining state
karangattu Jul 1, 2025
dee6dcf
maintain context across the conversation
karangattu Jul 1, 2025
2ac6b95
make it more langchain specific
karangattu Jul 1, 2025
b656214
preserve context within the chat conversation
karangattu Jul 1, 2025
6cf3494
allow streaming now in tool calling example
karangattu Jul 1, 2025
f9c38c8
Add session-based chat history retrieval function
karangattu Jul 1, 2025
b00f8a6
remove workout planner app example for pydantic ai
karangattu Jul 1, 2025
8ff507a
make the basic llama-index have streaming responses
karangattu Jul 1, 2025
61e0269
Allow maintaining context and streaming for tool calling example
karangattu Jul 1, 2025
527e985
remove multiple output structures and allow streaming
karangattu Jul 1, 2025
a3e7261
make the app even streamlined
karangattu Jul 1, 2025
7093e2b
use a streaming and stateful chatbot for pydantic ai basic app
karangattu Jul 1, 2025
622c82e
Update pkg-py/tests/playwright/chat/llama-index/basic/app.py
karangattu Jul 15, 2025
9877d83
Update pkg-py/tests/playwright/chat/llm_package/basic/app.py
karangattu Jul 15, 2025
c05c0c4
Update pkg-py/tests/playwright/chat/llama-index/structured_output/app.py
karangattu Jul 15, 2025
539e207
Refactor chat UI initialization and improve assistant prompts
karangattu Jul 15, 2025
d35d3a2
Reorder imports in app.py for consistency
karangattu Jul 15, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/basic/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import os

from dotenv import load_dotenv
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from shiny.express import ui

_ = load_dotenv()

model = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4.1-nano-2025-04-14",
)

prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. You answer in a friendly and concise manner.",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)

store = {}


def get_session_history(session_id: str):
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]


chain_with_history = RunnableWithMessageHistory(
prompt | model,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)

ui.page_opts(
title="Shiny Chat with LangChain History",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
)
chat.ui(
messages=[
{
"content": "Hello! I'm a chatbot that can remember our conversation. How can I help you today?",
"role": "assistant",
}
],
)


@chat.on_user_submit
async def handle_user_input(user_input: str):
config = {"configurable": {"session_id": "shiny_session_1"}}
response_stream = chain_with_history.astream(
{"input": user_input},
config=config,
)

async def stream_wrapper():
async for chunk in response_stream:
yield chunk.content

await chat.append_message_stream(stream_wrapper())
58 changes: 58 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/structured_output/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import os
from typing import Optional

from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from shiny.express import ui

_ = load_dotenv()


class Joke(BaseModel):
"""Joke to tell user."""

setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(
description="How funny the joke is, from 1 to 10"
)


_ = Joke.model_rebuild()

chat_client = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4o",
)

ui.page_opts(
title="Hello LangChain Chat Model using structured output",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
)
chat.ui(
messages=[
{
"role": "assistant",
"content": """
Hello! I'm your joke assistant. I can tell you jokes and rate them.
Here are some examples of what you can ask me:
- <span class="suggestion"> Tell me a joke about pirates. </span>
- <span class="suggestion"> Can you tell me a funny joke about skeletons? </span>
- <span class="suggestion"> Give me a joke about cats. </span>
""",
}
],
)


@chat.on_user_submit
async def handle_user_input(user_input: str):
joke = chat_client.with_structured_output(Joke).invoke(user_input)
joke_text = f"{joke.setup}\n\n{joke.punchline}\n\nRating: {joke.rating if joke.rating is not None else 'N/A'}"
await chat.append_message(joke_text)
115 changes: 115 additions & 0 deletions pkg-py/tests/playwright/chat/langchain/tool_calling/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import os
from datetime import datetime

from dotenv import load_dotenv
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from shiny.express import ui

_ = load_dotenv()


@tool
def get_current_time() -> str:
"""Get the current time in HH:MM:SS format."""
return datetime.now().strftime("%H:%M:%S")


@tool
def get_current_date() -> str:
"""Get the current date in YYYY-MM-DD format."""
return datetime.now().strftime("%Y-%m-%d")


@tool
def get_current_weather(city: str) -> str:
"""Get the current weather for a given city."""
return f"The current weather in {city} is sunny with a temperature of 25°C."


@tool
def calculator(expression: str) -> str:
"""Evaluate mathematical expressions"""
return str(eval(expression))


tools = [get_current_time, get_current_date, calculator, get_current_weather]

prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)

llm = ChatOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
model="gpt-4.1-nano-2025-04-14",
)

agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)

store = {}


def get_session_history(session_id: str):
"""
Retrieves the chat history for a given session ID.
If no history exists, a new one is created.
"""
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]


agent_with_chat_history = RunnableWithMessageHistory(
agent_executor,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
)

ui.page_opts(
title="Shiny Chat with LangChain Agent",
fillable=True,
fillable_mobile=True,
)

chat = ui.Chat(
id="chat",
)
chat.ui(
messages=[
{
"content": "Hello! I'm a chatbot with tools. I can get the time, date, weather, or do calculations. I'll also remember our conversation. How can I help?",
"role": "assistant",
}
],
)


@chat.on_user_submit
async def handle_user_input(user_input: str):
"""
Handles user input by streaming the agent's response.
"""
config = {"configurable": {"session_id": "shiny_session_tools_1"}}

async def stream_response():
async for event in agent_with_chat_history.astream_events(
{"input": user_input}, config=config, version="v1"
):
kind = event["event"]
if kind == "on_chat_model_stream":
content = event["data"]["chunk"].content
if content:
yield content

await chat.append_message_stream(stream_response())
57 changes: 57 additions & 0 deletions pkg-py/tests/playwright/chat/llama-index/basic/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from dotenv import load_dotenv
from llama_index.core.agent.workflow import AgentStream, FunctionAgent
from llama_index.core.workflow import Context
from llama_index.llms.openai import OpenAI
from shiny.express import ui

_ = load_dotenv()

llm = OpenAI(
model="gpt-4.1-nano-2025-04-14",
)

ui.page_opts(
title="Shiny Chat with LlamaIndex",
fillable=True,
fillable_mobile=True,
)

agent = FunctionAgent(
tools=[],
llm=llm,
system_prompt="You are a pirate with a colorful personality.",
)

ctx = Context(agent)

chat = ui.Chat(
id="chat",
)
chat.ui(
messages=[
{
"role": "assistant",
"content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me anything, matey!",
},
],
)


async def stream_response_from_agent(user_message: str, context: Context):
handler = agent.run(user_msg=user_message, ctx=context)

async for event in handler.stream_events():
if isinstance(event, AgentStream):
if event.delta:
yield event.delta

await handler


@chat.on_user_submit
async def handle_user_input(user_input: str):
async def stream_generator():
async for chunk in stream_response_from_agent(user_input, ctx):
yield chunk

await chat.append_message_stream(stream_generator())
Loading
Loading