diff --git a/pkg-py/tests/playwright/chat/langchain/basic/app.py b/pkg-py/tests/playwright/chat/langchain/basic/app.py
new file mode 100644
index 0000000..3bb49f1
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/langchain/basic/app.py
@@ -0,0 +1,75 @@
+import os
+
+from dotenv import load_dotenv
+from langchain_core.chat_history import InMemoryChatMessageHistory
+from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
+from langchain_core.runnables.history import RunnableWithMessageHistory
+from langchain_openai import ChatOpenAI
+from shiny.express import ui
+
+_ = load_dotenv()
+
+model = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+ model="gpt-4.1-nano-2025-04-14",
+)
+
+prompt = ChatPromptTemplate.from_messages(
+ [
+ (
+ "system",
+ "You are a helpful assistant. You answer in a friendly and concise manner.",
+ ),
+ MessagesPlaceholder(variable_name="history"),
+ ("human", "{input}"),
+ ]
+)
+
+store = {}
+
+
+def get_session_history(session_id: str):
+ if session_id not in store:
+ store[session_id] = InMemoryChatMessageHistory()
+ return store[session_id]
+
+
+chain_with_history = RunnableWithMessageHistory(
+ prompt | model,
+ get_session_history,
+ input_messages_key="input",
+ history_messages_key="history",
+)
+
+ui.page_opts(
+ title="Shiny Chat with LangChain History",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I'm a chatbot that can remember our conversation. How can I help you today?",
+ "role": "assistant",
+ }
+ ],
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ config = {"configurable": {"session_id": "shiny_session_1"}}
+ response_stream = chain_with_history.astream(
+ {"input": user_input},
+ config=config,
+ )
+
+ async def stream_wrapper():
+ async for chunk in response_stream:
+ yield chunk.content
+
+ await chat.append_message_stream(stream_wrapper())
diff --git a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py
new file mode 100644
index 0000000..d7d1137
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py
@@ -0,0 +1,58 @@
+import os
+from typing import Optional
+
+from dotenv import load_dotenv
+from langchain_openai import ChatOpenAI
+from pydantic import BaseModel, Field
+from shiny.express import ui
+
+_ = load_dotenv()
+
+
+class Joke(BaseModel):
+ """Joke to tell user."""
+
+ setup: str = Field(description="The setup of the joke")
+ punchline: str = Field(description="The punchline to the joke")
+ rating: Optional[int] = Field(
+ description="How funny the joke is, from 1 to 10"
+ )
+
+
+_ = Joke.model_rebuild()
+
+chat_client = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+ model="gpt-4o",
+)
+
+ui.page_opts(
+ title="Hello LangChain Chat Model using structured output",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "role": "assistant",
+ "content": """
+Hello! I'm your joke assistant. I can tell you jokes and rate them.
+Here are some examples of what you can ask me:
+- Tell me a joke about pirates.
+- Can you tell me a funny joke about skeletons?
+- Give me a joke about cats.
+ """,
+ }
+ ],
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ joke = chat_client.with_structured_output(Joke).invoke(user_input)
+ joke_text = f"{joke.setup}\n\n{joke.punchline}\n\nRating: {joke.rating if joke.rating is not None else 'N/A'}"
+ await chat.append_message(joke_text)
diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py
new file mode 100644
index 0000000..b7fa2cd
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py
@@ -0,0 +1,115 @@
+import os
+from datetime import datetime
+
+from dotenv import load_dotenv
+from langchain.agents import AgentExecutor, create_openai_tools_agent
+from langchain_core.chat_history import InMemoryChatMessageHistory
+from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
+from langchain_core.runnables.history import RunnableWithMessageHistory
+from langchain_core.tools import tool
+from langchain_openai import ChatOpenAI
+from shiny.express import ui
+
+_ = load_dotenv()
+
+
+@tool
+def get_current_time() -> str:
+ """Get the current time in HH:MM:SS format."""
+ return datetime.now().strftime("%H:%M:%S")
+
+
+@tool
+def get_current_date() -> str:
+ """Get the current date in YYYY-MM-DD format."""
+ return datetime.now().strftime("%Y-%m-%d")
+
+
+@tool
+def get_current_weather(city: str) -> str:
+ """Get the current weather for a given city."""
+ return f"The current weather in {city} is sunny with a temperature of 25°C."
+
+
+@tool
+def calculator(expression: str) -> str:
+ """Evaluate mathematical expressions"""
+ return str(eval(expression))
+
+
+tools = [get_current_time, get_current_date, calculator, get_current_weather]
+
+prompt = ChatPromptTemplate.from_messages(
+ [
+ ("system", "You are a helpful assistant"),
+ MessagesPlaceholder("chat_history", optional=True),
+ ("human", "{input}"),
+ MessagesPlaceholder("agent_scratchpad"),
+ ]
+)
+
+llm = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+ model="gpt-4.1-nano-2025-04-14",
+)
+
+agent = create_openai_tools_agent(llm, tools, prompt)
+agent_executor = AgentExecutor(agent=agent, tools=tools)
+
+store = {}
+
+
+def get_session_history(session_id: str):
+ """
+ Retrieves the chat history for a given session ID.
+ If no history exists, a new one is created.
+ """
+ if session_id not in store:
+ store[session_id] = InMemoryChatMessageHistory()
+ return store[session_id]
+
+
+agent_with_chat_history = RunnableWithMessageHistory(
+ agent_executor,
+ get_session_history,
+ input_messages_key="input",
+ history_messages_key="chat_history",
+)
+
+ui.page_opts(
+ title="Shiny Chat with LangChain Agent",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I'm a chatbot with tools. I can get the time, date, weather, or do calculations. I'll also remember our conversation. How can I help?",
+ "role": "assistant",
+ }
+ ],
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ """
+ Handles user input by streaming the agent's response.
+ """
+ config = {"configurable": {"session_id": "shiny_session_tools_1"}}
+
+ async def stream_response():
+ async for event in agent_with_chat_history.astream_events(
+ {"input": user_input}, config=config, version="v1"
+ ):
+ kind = event["event"]
+ if kind == "on_chat_model_stream":
+ content = event["data"]["chunk"].content
+ if content:
+ yield content
+
+ await chat.append_message_stream(stream_response())
diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py
new file mode 100644
index 0000000..873f2f4
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py
@@ -0,0 +1,57 @@
+from dotenv import load_dotenv
+from llama_index.core.agent.workflow import AgentStream, FunctionAgent
+from llama_index.core.workflow import Context
+from llama_index.llms.openai import OpenAI
+from shiny.express import ui
+
+_ = load_dotenv()
+
+llm = OpenAI(
+ model="gpt-4.1-nano-2025-04-14",
+)
+
+ui.page_opts(
+ title="Shiny Chat with LlamaIndex",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+agent = FunctionAgent(
+ tools=[],
+ llm=llm,
+ system_prompt="You are a pirate with a colorful personality.",
+)
+
+ctx = Context(agent)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "role": "assistant",
+ "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me anything, matey!",
+ },
+ ],
+)
+
+
+async def stream_response_from_agent(user_message: str, context: Context):
+ handler = agent.run(user_msg=user_message, ctx=context)
+
+ async for event in handler.stream_events():
+ if isinstance(event, AgentStream):
+ if event.delta:
+ yield event.delta
+
+ await handler
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ async def stream_generator():
+ async for chunk in stream_response_from_agent(user_input, ctx):
+ yield chunk
+
+ await chat.append_message_stream(stream_generator())
diff --git a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py
new file mode 100644
index 0000000..3798ca7
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py
@@ -0,0 +1,99 @@
+
+from chatlas import ChatOpenAI
+from dotenv import load_dotenv
+from llama_index.core import StorageContext, load_index_from_storage
+from shiny.express import ui
+
+_ = load_dotenv()
+
+
+# Load the knowledge store (index) from disk
+try:
+ storage_context = StorageContext.from_defaults(persist_dir="./storage")
+ index = load_index_from_storage(storage_context)
+ print("LlamaIndex loaded successfully from ./storage")
+except Exception as e:
+ print(f"Error loading LlamaIndex: {e}")
+ print(
+ "Please ensure you have run the index creation script first if this is your initial run."
+ )
+ from llama_index.core import Document, VectorStoreIndex
+
+ print("Creating a dummy index for demonstration purposes...")
+ bookstore_documents = [
+ "Our shipping policy states that standard shipping takes 3-5 business days. Express shipping takes 1-2 business days. Free shipping is offered on all orders over $50.",
+ "Returns are accepted within 30 days of purchase, provided the book is in its original condition. To initiate a return, please visit our 'Returns' page on the website and fill out the form.",
+ "The 'BookWorm Rewards' program offers members 10% off all purchases and early access to sales. You earn 1 point for every $1 spent.",
+ "We accept Visa, Mastercard, American Express, and PayPal.",
+ "Currently, we do not offer international shipping outside of the United States and Canada.",
+ "The book 'The Midnight Library' by Matt Haig is a New York Times bestseller. It explores themes of regret and parallel lives.",
+ "Orders placed before 2 PM EST are processed on the same day.",
+ ]
+ documents = [Document(text=d) for d in bookstore_documents]
+ index = VectorStoreIndex.from_documents(documents)
+ index.storage_context.persist(persist_dir="./storage")
+ print("Dummy index created and saved.")
+
+
+def retrieve_trusted_content(query: str, top_k: int = 3):
+ """
+ Retrieve relevant content from the bookstore's knowledge base.
+ This acts as the "lookup" for our customer service assistant.
+
+ Parameters
+ ----------
+ query
+ The customer's question used to semantically search the knowledge store.
+ top_k
+ The number of most relevant policy/book excerpts to retrieve.
+ """
+ retriever = index.as_retriever(similarity_top_k=top_k)
+ nodes = retriever.retrieve(query)
+ # Format the retrieved content clearly so Chatlas can use it as "trusted" information
+ return [f"{x.text}" for x in nodes]
+
+
+chat_client = ChatOpenAI(
+ system_prompt=(
+ "You are 'BookWorm Haven's Customer Service Assistant'. "
+ "Your primary goal is to help customers with their queries about shipping, returns, "
+ "payment methods, and book information based *only* on the provided trusted content. "
+ "If you cannot answer the question using the trusted content, politely state that "
+ "you don't have that information and suggest they visit the 'Help' section of the website."
+ ),
+ model="gpt-4o-mini",
+)
+
+# This is where Chatlas learns to "look up" information when needed.
+chat_client.register_tool(retrieve_trusted_content)
+
+
+ui.page_opts(
+ title="BookWorm Haven Customer Service",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+ messages=[
+ """
+Hello! I am BookWorm Haven's Customer Service Assistant.
+
+Here are some examples of what you can ask me:
+
+- How long does standard shipping take?
+- What is your return policy?
+- Can you tell me about 'The Midnight Library'?
+
+ """
+ ],
+)
+chat.ui()
+
+
+# Generate a response when the user submits a message
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ response = await chat_client.stream_async(user_input)
+ await chat.append_message_stream(response)
diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py
new file mode 100644
index 0000000..a70e0da
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py
@@ -0,0 +1,86 @@
+from typing import List, Optional
+
+from dotenv import load_dotenv
+from llama_index.core.agent.workflow import AgentStream, FunctionAgent
+from llama_index.core.workflow import Context
+from llama_index.llms.openai import OpenAI
+from pydantic import BaseModel, Field
+from shiny.express import ui
+
+_ = load_dotenv()
+
+
+class AnalysisResponse(BaseModel):
+ """A structured analysis response for complex queries."""
+
+ summary: str = Field(description="Executive summary of the analysis")
+ detailed_analysis: str = Field(description="Detailed analysis content")
+ methodology: Optional[str] = Field(
+ description="Methodology used for analysis"
+ )
+ conclusions: List[str] = Field(description="Key conclusions drawn")
+ recommendations: Optional[List[str]] = Field(
+ description="Actionable recommendations"
+ )
+
+
+_ = AnalysisResponse.model_rebuild()
+
+llm = OpenAI(model="gpt-4.1-nano-2025-04-14")
+
+ui.page_opts(
+ title="Analysis Assistant",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+agent = FunctionAgent(
+ tools=[],
+ llm=llm,
+ system_prompt="""You are an analytical assistant that provides thorough analysis.
+ Be clear, concise, and analytical in your responses.""",
+)
+
+ctx = Context(agent)
+
+if not hasattr(ctx, "conversation_history"):
+ ctx.conversation_history = []
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "role": "assistant",
+ "content": """
+Hello! I'm your analysis assistant. I can help you analyze topics, data, and situations.
+Here are some examples of what you can ask me:
+
+- Analyze the impact of remote work on productivity.
+- Provide a detailed analysis of electric vehicle adoption.
+- What are the key conclusions from recent climate change studies?
+ """,
+ },
+ ],
+)
+
+
+async def stream_response_from_agent(user_message: str, context: Context):
+ handler = agent.run(user_msg=user_message, ctx=context)
+
+ async for event in handler.stream_events():
+ if isinstance(event, AgentStream):
+ if event.delta:
+ yield event.delta
+
+ await handler
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ async def stream_generator():
+ async for chunk in stream_response_from_agent(user_input, ctx):
+ yield chunk
+
+ await chat.append_message_stream(stream_generator())
diff --git a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py
new file mode 100644
index 0000000..f127432
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py
@@ -0,0 +1,96 @@
+from datetime import datetime
+
+import pytz
+from dotenv import load_dotenv
+from llama_index.core.agent.workflow import AgentStream, FunctionAgent
+from llama_index.core.tools import FunctionTool
+from llama_index.core.workflow import Context
+from llama_index.llms.openai import OpenAI
+from shiny.express import ui
+
+_ = load_dotenv()
+
+
+def get_current_time(timezone: str = "UTC") -> str:
+ """Get the current time in the specified timezone.
+
+ Args:
+ timezone: The timezone to get the time for (e.g., 'UTC', 'US/Eastern', 'US/Pacific')
+
+ Returns:
+ Current time as a formatted string
+ """
+ tz = pytz.timezone(timezone)
+ current_time = datetime.now(tz)
+ return current_time.strftime("%I:%M:%S %p %Z")
+
+
+def get_current_date(timezone: str = "UTC") -> str:
+ """Get the current date in the specified timezone.
+
+ Args:
+ timezone: The timezone to get the date for (e.g., 'UTC', 'US/Eastern', 'US/Pacific')
+
+ Returns:
+ Current date as a formatted string
+ """
+ tz = pytz.timezone(timezone)
+ current_date = datetime.now(tz)
+ return current_date.strftime("%A, %B %d, %Y")
+
+
+time_tool = FunctionTool.from_defaults(fn=get_current_time)
+date_tool = FunctionTool.from_defaults(fn=get_current_date)
+
+llm = OpenAI(
+ model="gpt-4.1-nano-2025-04-14",
+)
+
+ui.page_opts(
+ title="Shiny Chat with LlamaIndex Tool Calling",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+agent = FunctionAgent(
+ tools=[time_tool, date_tool],
+ llm=llm,
+ system_prompt="You are a pirate with a colorful personality. You can tell people the time and date when they need to know when to set sail!",
+)
+
+ctx = Context(agent)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "role": "assistant",
+ "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! I can also tell ye the time and date if ye need to know when to set sail!",
+ },
+ ],
+)
+
+
+async def stream_response_from_agent(user_message: str, context: Context):
+ """Stream response from agent using the context-based approach."""
+ handler = agent.run(user_msg=user_message, ctx=context)
+
+ async for event in handler.stream_events():
+ if isinstance(event, AgentStream):
+ if event.delta:
+ yield event.delta
+
+ await handler
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ """Handle user input and stream response using context."""
+
+ async def stream_generator():
+ async for chunk in stream_response_from_agent(user_input, ctx):
+ yield chunk
+
+ await chat.append_message_stream(stream_generator())
diff --git a/pkg-py/tests/playwright/chat/llm_package/basic/app.py b/pkg-py/tests/playwright/chat/llm_package/basic/app.py
new file mode 100644
index 0000000..b0285d3
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llm_package/basic/app.py
@@ -0,0 +1,40 @@
+import llm
+from dotenv import load_dotenv
+from shiny.express import ui
+
+# Load environment variables from .env file
+_ = load_dotenv()
+
+model = llm.get_model("gpt-4o-mini")
+model.system_prompt = "You are a helpful assistant."
+
+conversation = model.conversation()
+
+ui.page_opts(
+ title="Hello LLM Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I’m your AI assistant for time queries. Ask me for the current time in any city or timezone (for example: 'What time is it in Tokyo?'), and I’ll provide the answer. How can I assist you today?",
+ "role": "assistant",
+ }
+ ]
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ response = conversation.prompt(user_input, stream=True)
+
+ async def stream_generator():
+ for chunk in response:
+ yield chunk
+
+ await chat.append_message_stream(stream_generator())
diff --git a/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py
new file mode 100644
index 0000000..e7c4b44
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py
@@ -0,0 +1,54 @@
+import llm
+from dotenv import load_dotenv
+from pydantic import BaseModel
+from shiny.express import ui
+
+# Load environment variables from .env file
+_ = load_dotenv()
+
+
+class Dog(BaseModel):
+ name: str
+ age: int
+
+
+model = llm.get_model("gpt-4o-mini")
+
+
+model.system_prompt = "You are a helpful assistant."
+
+ui.page_opts(
+ title="Hello LLM Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": """
+ Here are some examples of what you can ask me:
+
+- Tell me about a dog named Bella who is 3 years old.
+- I have a dog named Rocky, age 7.
+- Give me info about a puppy called Luna, 1 year old.
+ """,
+ "role": "assistant",
+ }
+ ]
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ response = model.prompt(user_input, stream=True, schema=Dog)
+
+ async def stream_generator():
+ for chunk in response:
+ yield chunk
+
+ await chat.append_message_stream(stream_generator())
diff --git a/pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py b/pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py
new file mode 100644
index 0000000..c704254
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py
@@ -0,0 +1,44 @@
+import llm
+from dotenv import load_dotenv
+from shiny.express import ui
+
+# Load environment variables from .env file
+_ = load_dotenv()
+
+
+def get_current_time() -> str:
+ """Returns the current date and time as a string."""
+ from datetime import datetime
+
+ return str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+
+model = llm.get_model("gpt-4o-mini")
+model.system_prompt = "You are a helpful assistant."
+
+
+ui.page_opts(
+ title="Hello LLM Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I'm a chatbot that can help you with the current time. How can I assist you today?",
+ "role": "assistant",
+ }
+ ],
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ response = model.chain(user_input, tools=[get_current_time])
+
+ # Stream the response to the chat window
+ await chat.append_message_stream(response)
diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py
new file mode 100644
index 0000000..3eed3df
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py
@@ -0,0 +1,52 @@
+from typing import List
+
+from dotenv import load_dotenv
+from pydantic_ai import Agent
+from pydantic_ai.messages import ModelMessage
+from shiny import reactive
+from shiny.express import ui
+
+_ = load_dotenv()
+chat_client = Agent(
+ "openai:gpt-4.1-nano-2025-04-14",
+ system_prompt="You are a helpful assistant.",
+)
+
+conversation_history = reactive.value(list[ModelMessage]([]))
+
+ui.page_opts(
+ title="Hello OpenAI Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I’m your AI assistant. You can ask me questions, get explanations, or help with tasks like brainstorming, summarizing, or coding. How can I assist you today?",
+ "role": "user",
+ }
+ ]
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ current_history = conversation_history.get()
+ stream = pydantic_stream_generator(user_input, current_history)
+ await chat.append_message_stream(stream)
+
+
+async def pydantic_stream_generator(
+ user_input: str, current_history: List[ModelMessage]
+):
+ message_history = current_history if current_history else None
+ async with chat_client.run_stream(
+ user_input, message_history=message_history
+ ) as result:
+ async for chunk in result.stream_text(delta=True):
+ yield chunk
+ conversation_history.set(result.all_messages())
diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py
new file mode 100644
index 0000000..d9b6d96
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py
@@ -0,0 +1,54 @@
+from dotenv import load_dotenv
+from pydantic import BaseModel
+from pydantic_ai import Agent
+from shiny.express import ui
+
+
+class CityLocation(BaseModel):
+ city: str
+ county: str
+ state: str
+
+
+_ = load_dotenv()
+chat_client = Agent(
+ "openai:o4-mini",
+ system_prompt="You are a helpful assistant.",
+ output_type=CityLocation,
+)
+
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello OpenAI Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+
+# Create and display a Shiny chat component
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "role": "assistant",
+ "content": """
+Hello! Ask me where the superbowl was held in any year and I can tell the state, county, and city.
+For example, you can ask:
+- Where was the superbowl in 2020?
+- What city hosted the superbowl in 2015?
+- Where was the superbowl in 2018?
+""",
+ }
+ ],
+)
+
+
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ result = await chat_client.run(user_input)
+ city_info = result.output
+ message = f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}"
+ await chat.append_message(message)
diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py
new file mode 100644
index 0000000..efdb350
--- /dev/null
+++ b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py
@@ -0,0 +1,67 @@
+import os
+
+from dotenv import load_dotenv
+from pydantic_ai import Agent
+from shiny.express import ui
+
+_ = load_dotenv()
+chat_client = Agent(
+ "openai:o4-mini",
+ system_prompt="You are a helpful assistant.",
+)
+
+
+@chat_client.tool_plain
+def get_weather(location: str) -> str:
+ """Get the current weather for a given location."""
+ return f"The current weather in {location} is sunny with a temperature of 25°C."
+
+
+@chat_client.tool_plain
+def get_time() -> str:
+ """Get the current time."""
+ from datetime import datetime
+
+ return f"The current time is {datetime.now().strftime('%H:%M:%S')}."
+
+
+@chat_client.tool_plain
+def get_joke() -> str:
+ """Tell a joke."""
+ return "Why don't scientists trust atoms? Because they make up everything!"
+
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Use tool calling with Pydantic AI",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+
+# Create and display a Shiny chat component
+chat = ui.Chat(
+ id="chat",
+)
+chat.ui(
+ messages=[
+ {
+ "content": "Hello! I'm a chatbot that can help you with weather, time, and jokes. How can I assist you today?",
+ "role": "assistant",
+ }
+ ],
+)
+
+
+# Generate a response when the user submits a message
+@chat.on_user_submit
+async def handle_user_input(user_input: str):
+ stream = pydantic_stream_generator(user_input)
+ await chat.append_message_stream(stream)
+
+
+# An async generator function to stream the response from the Pydantic AI agent
+async def pydantic_stream_generator(user_input: str):
+ async with chat_client.run_stream(user_input) as result:
+ async for chunk in result.stream_text(delta=True):
+ yield chunk
diff --git a/pyproject.toml b/pyproject.toml
index 16a60df..2bd76d8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,6 +45,12 @@ test = [
"shinylive",
"shinywidgets",
"tox-uv>=1",
+ "pydantic-ai",
+ "python-dotenv",
+ "llm",
+ "llama-index",
+ "langchain",
+ "langchain-openai",
]
[tool.uv]