From e785f63b60fa09c0585abca10258fdb44ec75330 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 3 Jun 2025 15:46:47 -0700 Subject: [PATCH 01/31] Add some examples leveraging pydantic-AI --- .../playwright/chat/pydantic-ai/basic/app.py | 43 +++++ .../pydantic-ai/data-sci-adventure/app.py | 153 ++++++++++++++++++ .../pydantic-ai/data-sci-adventure/prompt.md | 47 ++++++ .../chat/pydantic-ai/requirements.txt | 6 + .../chat/pydantic-ai/structured_output/app.py | 46 ++++++ .../chat/pydantic-ai/tool_calling/app.py | 63 ++++++++ .../chat/pydantic-ai/workout-plan/app.py | 101 ++++++++++++ 7 files changed, 459 insertions(+) create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py create mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py new file mode 100644 index 0000000..7615b2c --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py @@ -0,0 +1,43 @@ +import os + +from dotenv import load_dotenv +from pydantic_ai import Agent + +from shiny.express import ui + +_ = load_dotenv() +chat_client = Agent( + "openai:o4-mini", + api_key=os.environ.get("OPENAI_API_KEY"), + system_prompt="You are a helpful assistant.", +) + + +# Set some Shiny page options +ui.page_opts( + title="Hello OpenAI Chat", + fillable=True, + fillable_mobile=True, +) + + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + stream = pydantic_stream_generator(user_input) + await chat.append_message_stream(stream) + + +# An async generator function to stream the response from the Pydantic AI agent +async def pydantic_stream_generator(user_input: str): + async with chat_client.run_stream(user_input) as result: + async for chunk in result.stream_text(delta=True): + yield chunk diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py new file mode 100644 index 0000000..0311c49 --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py @@ -0,0 +1,153 @@ +import os +from pathlib import Path +from dotenv import load_dotenv +from pydantic_ai import Agent +from faicons import icon_svg +from shiny import App, Inputs, reactive, ui + +welcome = """ +Welcome to a choose-your-own learning adventure in data science! +Please pick your role, the size of the company you work for, and the industry you're in. +Then click the "Start adventure" button to begin. +""" + +app_ui = ui.page_sidebar( + ui.sidebar( + ui.input_selectize( + "company_role", + "You are a...", + choices=[ + "Machine Learning Engineer", + "Data Analyst", + "Research Scientist", + "MLOps Engineer", + "Data Science Generalist", + ], + selected="Data Analyst", + ), + ui.input_selectize( + "company_size", + "who works for...", + choices=[ + "yourself", + "a startup", + "a university", + "a small business", + "a medium-sized business", + "a large business", + "an enterprise corporation", + ], + selected="a medium-sized business", + ), + ui.input_selectize( + "company_industry", + "in the ... industry", + choices=[ + "Healthcare and Pharmaceuticals", + "Banking, Financial Services, and Insurance", + "Technology and Software", + "Retail and E-commerce", + "Media and Entertainment", + "Telecommunications", + "Automotive and Manufacturing", + "Energy and Oil & Gas", + "Agriculture and Food Production", + "Cybersecurity and Defense", + ], + selected="Healthcare and Pharmaceuticals", + ), + ui.input_action_button( + "go", + "Start adventure", + icon=icon_svg("play"), + class_="btn btn-primary", + ), + id="sidebar", + ), + ui.chat_ui("chat"), + title="Choose your own data science adventure", + fillable=True, + fillable_mobile=True, +) + + +def server(input: Inputs): + _ = load_dotenv() + + try: + app_dir = Path(__file__).parent + with open(app_dir / "prompt.md") as f: + system_prompt_content = f.read() + except FileNotFoundError: + print("Error: prompt.md not found. Please create it in the app directory.") + print("Using a default system prompt for now.") + system_prompt_content = ( + "You are a storyteller creating an interactive adventure." + ) + except Exception as e: + print(f"Error loading prompt.md: {e}") + system_prompt_content = ( + "You are a storyteller creating an interactive adventure." + ) + + chat_client = Agent( + "openai:o4-mini", + api_key=os.environ.get("OPENAI_API_KEY"), + system_prompt=system_prompt_content, + ) + + chat = ui.Chat(id="chat") + + @reactive.calc + def starting_prompt(): + return ( + f"I want a story that features a {input.company_role()} " + f"who works for {input.company_size()} in the {input.company_industry()} industry." + ) + + has_started: reactive.value[bool] = reactive.value(False) + + @reactive.effect + @reactive.event(input.go) + async def _(): + if has_started(): + await chat.clear_messages() + await chat.append_message(welcome) + chat.update_user_input(value=starting_prompt(), submit=True) + chat.update_user_input(value="", focus=True) + has_started.set(True) + + @reactive.effect + async def _(): + if has_started(): + ui.update_action_button( + "go", label="Restart adventure", icon=icon_svg("repeat") + ) + ui.update_sidebar("sidebar", show=False) + else: + if not chat.messages(): + await chat.append_message(welcome) + chat.update_user_input(value=starting_prompt()) + + async def pydantic_adventure_stream_generator(current_user_input: str): + async with chat_client.run_stream(current_user_input) as result: + async for chunk in result.stream_text(delta=True): + yield chunk + + @chat.on_user_submit + async def handle_user_submission(user_input: str): + n_msgs = len(chat.messages()) + current_input_for_llm = user_input + + if n_msgs == 1: + current_input_for_llm += " Please jump right into the story without any greetings or introductions." + elif n_msgs == 4: + current_input_for_llm += ". Time to nudge this story toward its conclusion. Give one more scenario (like creating a report, dashboard, or presentation) that will let me wrap this up successfully." + elif n_msgs == 5: + current_input_for_llm += ". Time to wrap this up. Conclude the story in the next step and offer to summarize the chat or create example scripts in R or Python. Consult your instructions for the correct format. If the user asks for code, remember that you'll need to create simulated data that matches the story." + + stream_generator = pydantic_adventure_stream_generator(current_input_for_llm) + await chat.append_message_stream(stream_generator) + + +app = App(app_ui, server) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md new file mode 100644 index 0000000..92f81cb --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md @@ -0,0 +1,47 @@ +You are an AI guide creating an interactive "choose-your-own adventure" experience for +data scientists. Your task is to present scenarios and choices that allow the user to +navigate through a series of real-world data science challenges and situations to +complete a successful data science project. + +Follow these steps to continue the adventure: + +1. Read the user's input carefully. + +2. Create 2-3 distinct choices for the user, considering: + + - Select appropriate emojis for each choice + - Determine which part of each choice should be marked as a clickable suggestion + +3. Present a short scenario (2-3 sentences) that builds upon the user's previous choice + or input. + +4. Offer 2-3 choices for the user to select from. Each choice should be formatted as + follows: + + - Begin with a single, relevant emoji + - Wrap the clickable part of the suggestion (the part that makes sense as a user response) in a span with class "suggestion" + - The entire choice, including the emoji and any additional context, should be on a single line + + Example format (do not use this content, create your own): + + * 📊 Analyze the data using regression analysis to identify trends + * 🧮 Apply clustering algorithms to segment the customer base + * 🔬 Conduct A/B testing on the new feature + +5. Ensure that your scenario and choices are creative, clear, and concise, while + remaining relevant to data science topics. + +6. Your goal is to guide the user to the end of a successful data science project that + is completed in 3-4 turns. + + Remember, the user will continue the adventure by selecting one of the choices you + present. Be prepared to build upon any of the options in future interactions. + +When the story reaches its conclusion, offer to summarize the conversation or to create +an example analysis using R or Python, but lean towards using Python. In Python, focus +on using polars and plotnine. In R, focus on tidyverse tools like dplyr and ggplot2. +Dashboards should be built with Shiny for Python or Shiny for R. Reports should be +written using Quarto. + +Remember to use the "suggestion submit" class for adventure steps and the "suggestion" +class for the summary choices. diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt b/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt new file mode 100644 index 0000000..f5835dc --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt @@ -0,0 +1,6 @@ +chatlas +faicons +openai +pydantic-ai +python-dotenv +shiny diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py new file mode 100644 index 0000000..24cc592 --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py @@ -0,0 +1,46 @@ +import os + +from dotenv import load_dotenv +from pydantic_ai import Agent +from pydantic import BaseModel + +from shiny.express import ui + + +class CityLocation(BaseModel): + city: str + county: str + state: str + + +_ = load_dotenv() +chat_client = Agent( + "openai:o4-mini", + api_key=os.environ.get("OPENAI_API_KEY"), + system_prompt="You are a helpful assistant.", + output_type=CityLocation, +) + + +# Set some Shiny page options +ui.page_opts( + title="Hello OpenAI Chat", + fillable=True, + fillable_mobile=True, +) + + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! Ask me where the superbowl was held in any year?"], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + result = await chat_client.run(user_input) + city_info = result.output + message = f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}" + await chat.append_message(message) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py new file mode 100644 index 0000000..3cbd315 --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py @@ -0,0 +1,63 @@ +import os + +from dotenv import load_dotenv +from pydantic_ai import Agent + +from shiny.express import ui + +_ = load_dotenv() +chat_client = Agent( + "openai:o4-mini", + api_key=os.environ.get("OPENAI_API_KEY"), + system_prompt="You are a helpful assistant.", +) + + +@chat_client.tool_plain +def get_weather(location: str) -> str: + """Get the current weather for a given location.""" + return f"The current weather in {location} is sunny with a temperature of 25°C." + + +@chat_client.tool_plain +def get_time() -> str: + """Get the current time.""" + from datetime import datetime + + return f"The current time is {datetime.now().strftime('%H:%M:%S')}." + + +@chat_client.tool_plain +def get_joke() -> str: + """Tell a joke.""" + return "Why don't scientists trust atoms? Because they make up everything!" + + +# Set some Shiny page options +ui.page_opts( + title="Use tool calling with Pydantic AI", + fillable=True, + fillable_mobile=True, +) + + +# Create and display a Shiny chat component +chat = ui.Chat( + id="chat", + messages=["Hello! Ask me to tell a joke, get the weather, or tell the time."], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + stream = pydantic_stream_generator(user_input) + await chat.append_message_stream(stream) + + +# An async generator function to stream the response from the Pydantic AI agent +async def pydantic_stream_generator(user_input: str): + async with chat_client.run_stream(user_input) as result: + async for chunk in result.stream_text(delta=True): + yield chunk diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py new file mode 100644 index 0000000..3ef5f36 --- /dev/null +++ b/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py @@ -0,0 +1,101 @@ +import os +from dotenv import load_dotenv +from pydantic_ai import Agent +from faicons import icon_svg +from shiny import reactive +from shiny.express import input, render, ui + +_ = load_dotenv() + +# Create a Pydantic AI agent +chat_client = Agent( + "openai:o4-mini", + api_key=os.environ.get("OPENAI_API_KEY"), + system_prompt=""" + You are a helpful AI fitness coach. + Give detailed workout plans to users based on their fitness goals and experience level. + Before getting into details, give a brief introduction to the workout plan. + Keep the overall tone encouraging and professional yet friendly. + Generate the response in Markdown format and avoid using h1, h2, or h3. + """, +) + +# Shiny page options +ui.page_opts(title="Personalized Workout Plan Generator") + +# Sidebar UI for user inputs +with ui.sidebar(open={"mobile": "always-above"}): + ui.input_select( + "goal", + "Fitness Goal", + ["Strength", "Cardio", "Flexibility", "General Fitness"], + ) + ui.input_selectize( + "equipment", + "Available Equipment", + ["Dumbbells", "Barbell", "Resistance Bands", "Bodyweight"], + multiple=True, + selected=["Bodyweight", "Barbell"], + ) + ui.input_slider("experience", "Experience Level", min=1, max=10, value=5) + ui.input_slider("duration", "Duration (mins)", min=15, max=90, step=5, value=45) + ui.input_select( + "daysPerWeek", + "Days per Week", + [str(i) for i in range(1, 8)], + selected="3", + ) + ui.input_task_button("generate", "Get Workout", icon=icon_svg("person-running")) + + # Download button UI + @render.express + def download_ui(): + plan = workout_stream.latest_stream.result() + + @render.download(filename="workout_plan.md", label="Download Workout") + def download(): + yield plan + + +# Create a Markdown stream +workout_stream = ui.MarkdownStream("workout_stream") + +# Display the Markdown stream in the main content area +workout_stream.ui( + content=( + "Hi there! 👋 I'm your AI fitness coach. 💪" + "\n\n" + "Fill out the form in the sidebar to get started. 📝 🏋️‍♂ ️" + ) +) + + +# Async generator function to stream the response from the Pydantic AI agent +async def pydantic_workout_stream_generator(user_prompt: str): + """ + An async generator that streams text chunks from the Pydantic AI agent. + """ + # chat_client is defined globally + async with chat_client.run_stream(user_prompt) as result: + async for chunk in result.stream_text(delta=True): + yield chunk + + +# Reactive effect to generate workout plan when the button is clicked +@reactive.effect +@reactive.event(input.generate) +async def _(): + # Construct the prompt based on user inputs + prompt = f""" + Generate a brief {input.duration()}-minute workout plan for a {input.goal()} fitness goal. + On a scale of 1-10, I have a level {input.experience()} experience, + works out {input.daysPerWeek()} days per week, and have access to: + {", ".join(input.equipment()) if input.equipment() else "no equipment"}. + Format the response in Markdown. + """ + + # Generate the stream using the pydantic_workout_stream_generator + stream_generator = pydantic_workout_stream_generator(prompt) + + # Pass the async generator to the MarkdownStream + await workout_stream.stream(stream_generator) From 2f19d5a079deed2a6742da3fbe807bed535dd18d Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 3 Jun 2025 15:53:28 -0700 Subject: [PATCH 02/31] linting files --- pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py | 1 - .../playwright/chat/pydantic-ai/data-sci-adventure/app.py | 3 ++- .../playwright/chat/pydantic-ai/structured_output/app.py | 7 ++++--- .../tests/playwright/chat/pydantic-ai/tool_calling/app.py | 1 - .../tests/playwright/chat/pydantic-ai/workout-plan/app.py | 7 ++++--- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py index 7615b2c..f243970 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py @@ -2,7 +2,6 @@ from dotenv import load_dotenv from pydantic_ai import Agent - from shiny.express import ui _ = load_dotenv() diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py index 0311c49..124e4d9 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py @@ -1,8 +1,9 @@ import os from pathlib import Path + from dotenv import load_dotenv -from pydantic_ai import Agent from faicons import icon_svg +from pydantic_ai import Agent from shiny import App, Inputs, reactive, ui welcome = """ diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py index 24cc592..32e5cb1 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py @@ -1,9 +1,8 @@ import os from dotenv import load_dotenv -from pydantic_ai import Agent from pydantic import BaseModel - +from pydantic_ai import Agent from shiny.express import ui @@ -42,5 +41,7 @@ class CityLocation(BaseModel): async def handle_user_input(user_input: str): result = await chat_client.run(user_input) city_info = result.output - message = f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}" + message = ( + f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}" + ) await chat.append_message(message) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py index 3cbd315..b72f254 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py @@ -2,7 +2,6 @@ from dotenv import load_dotenv from pydantic_ai import Agent - from shiny.express import ui _ = load_dotenv() diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py index 3ef5f36..52d6fa0 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py @@ -1,7 +1,8 @@ import os + from dotenv import load_dotenv -from pydantic_ai import Agent from faicons import icon_svg +from pydantic_ai import Agent from shiny import reactive from shiny.express import input, render, ui @@ -20,7 +21,7 @@ """, ) -# Shiny page options +# Shiny page options ui.page_opts(title="Personalized Workout Plan Generator") # Sidebar UI for user inputs @@ -96,6 +97,6 @@ async def _(): # Generate the stream using the pydantic_workout_stream_generator stream_generator = pydantic_workout_stream_generator(prompt) - + # Pass the async generator to the MarkdownStream await workout_stream.stream(stream_generator) From 02df719d1ba26fea137cf04c43115d20c7b30b2b Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 3 Jun 2025 15:56:06 -0700 Subject: [PATCH 03/31] remove requirements.txt --- pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt | 6 ------ pyproject.toml | 2 ++ 2 files changed, 2 insertions(+), 6 deletions(-) delete mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt b/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt deleted file mode 100644 index f5835dc..0000000 --- a/pkg-py/tests/playwright/chat/pydantic-ai/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -chatlas -faicons -openai -pydantic-ai -python-dotenv -shiny diff --git a/pyproject.toml b/pyproject.toml index 16a60df..ffaa1da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,8 @@ test = [ "shinylive", "shinywidgets", "tox-uv>=1", + "pydantic-ai", + "python-dotenv" ] [tool.uv] From 6205148bc2516cadf28b84f2c6ea7b8f5545a7da Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Wed, 18 Jun 2025 13:04:40 +0530 Subject: [PATCH 04/31] add more alternatives to chatlas --- .../playwright/chat/langchain/basic/app.py | 34 +++++ .../chat/langchain/structured_output/app.py | 44 +++++++ .../chat/langchain/tool_calling/app.py | 91 ++++++++++++++ .../playwright/chat/llama-index/basic/app.py | 46 +++++++ .../rag_with_chatlas/rag_example.py | 109 ++++++++++++++++ .../chat/llama-index/structured_output/app.py | 117 ++++++++++++++++++ .../chat/llama-index/tool_calling/app.py | 91 ++++++++++++++ .../playwright/chat/llm_package/basic/app.py | 36 ++++++ .../chat/llm_package/tool_calling_ex/app.py | 39 ++++++ 9 files changed, 607 insertions(+) create mode 100644 pkg-py/tests/playwright/chat/langchain/basic/app.py create mode 100644 pkg-py/tests/playwright/chat/langchain/structured_output/app.py create mode 100644 pkg-py/tests/playwright/chat/langchain/tool_calling/app.py create mode 100644 pkg-py/tests/playwright/chat/llama-index/basic/app.py create mode 100644 pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py create mode 100644 pkg-py/tests/playwright/chat/llama-index/structured_output/app.py create mode 100644 pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py create mode 100644 pkg-py/tests/playwright/chat/llm_package/basic/app.py create mode 100644 pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py diff --git a/pkg-py/tests/playwright/chat/langchain/basic/app.py b/pkg-py/tests/playwright/chat/langchain/basic/app.py new file mode 100644 index 0000000..682ba86 --- /dev/null +++ b/pkg-py/tests/playwright/chat/langchain/basic/app.py @@ -0,0 +1,34 @@ +import os + +from dotenv import load_dotenv +from langchain_openai import ChatOpenAI +from shiny.express import ui + +_ = load_dotenv() +chat_client = ChatOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + model="gpt-4o", +) + +ui.page_opts( + title="Hello LangChain Chat Models", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = chat_client.astream(user_input) + + async def stream_wrapper(): + async for item in response: + yield item.content + + await chat.append_message_stream(stream_wrapper()) diff --git a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py new file mode 100644 index 0000000..9d78110 --- /dev/null +++ b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py @@ -0,0 +1,44 @@ +import os +from typing import Optional + +from dotenv import load_dotenv +from langchain_openai import ChatOpenAI +from pydantic import BaseModel, Field +from shiny.express import ui + +_ = load_dotenv() + + +class Joke(BaseModel): + """Joke to tell user.""" + + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10") + + +_ = Joke.model_rebuild() + +chat_client = ChatOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + model="gpt-4o", +) + +ui.page_opts( + title="Hello LangChain Chat Model using structured output", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=["Hello! How can I help you today?"], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + joke = chat_client.with_structured_output(Joke).invoke(user_input) + joke_text = f"{joke.setup}\n\n{joke.punchline}\n\nRating: {joke.rating if joke.rating is not None else 'N/A'}" + await chat.append_message_stream(joke_text) diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py new file mode 100644 index 0000000..866fe85 --- /dev/null +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -0,0 +1,91 @@ +import os +from datetime import datetime + +from dotenv import load_dotenv +from langchain_core.messages import HumanMessage +from langchain_core.tools import tool +from langchain_openai import ChatOpenAI +from shiny.express import ui + +_ = load_dotenv() + + +@tool +def get_current_time() -> str: + """Get the current time in HH:MM:SS format.""" + return datetime.now().strftime("%H:%M:%S") + + +@tool +def get_current_date() -> str: + """Get the current date in YYYY-MM-DD format.""" + return datetime.now().strftime("%Y-%m-%d") + + +@tool +def get_current_datetime() -> str: + """Get the current date and time in a readable format.""" + return datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + +tools = [get_current_time, get_current_date, get_current_datetime] + +chat_client = ChatOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + model="gpt-4o", +).bind_tools(tools) + +ui.page_opts( + title="Hello LangChain Chat Models with Tools", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=[ + "Hello! How can I help you today? I can tell you the current time, date, or both!" + ], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + messages = [HumanMessage(content=user_input)] + + async def stream_wrapper(): + # while True: + response = await chat_client.astream(messages) + + # if response.tool_calls: + # if response.content: + # yield response.content + "\n\n" + + # for tool_call in response.tool_calls: + # tool_to_call = None + # for tool in tools: + # if tool.name == tool_call["name"]: + # tool_to_call = tool + # break + + # if tool_to_call: + # tool_result = tool_to_call.invoke(tool_call["args"]) + + # messages.append(response) + # messages.append( + # ToolMessage( + # content=str(tool_result), tool_call_id=tool_call["id"] + # ) + # ) + + # continue + # else: + # yield response.content + # break + + async def stream_wrapper(): + async for item in response: + yield item.content + + await chat.append_message_stream(stream_wrapper()) diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py new file mode 100644 index 0000000..a0f78d9 --- /dev/null +++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py @@ -0,0 +1,46 @@ +from dotenv import load_dotenv +from llama_index.core.llms import ChatMessage +from llama_index.llms.openai import OpenAI +from shiny.express import ui + +# Load environment variables from .env file +_ = load_dotenv() + +llm = OpenAI( + model="gpt-4o-mini", +) + +ui.page_opts( + title="Shiny Chat with LlamaIndex", + fillable=True, + fillable_mobile=True, +) + + +chat = ui.Chat( + id="chat", + messages=[ + {"role": "system", "content": "You are a pirate with a colorful personality."}, + {"role": "user", "content": "What is your name, pirate?"}, + { + "role": "assistant", + "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas!", + }, + ], +) +chat.ui() + + +async def get_response_tokens(conversation: list[ChatMessage]): + response_stream = await llm.astream_chat(conversation) + async for r in response_stream: + yield r.delta + + +@chat.on_user_submit +async def handle_user_input(): + conversation = [ + ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() + ] + + await chat.append_message_stream(get_response_tokens(conversation)) diff --git a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py new file mode 100644 index 0000000..d7c7674 --- /dev/null +++ b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py @@ -0,0 +1,109 @@ +import os + +from chatlas import ChatOpenAI +from llama_index.core import StorageContext, load_index_from_storage + +_ = os.environ.get("OPENAI_API_KEY") + + +# Load the knowledge store (index) from disk +try: + storage_context = StorageContext.from_defaults(persist_dir="./storage") + index = load_index_from_storage(storage_context) + print("LlamaIndex loaded successfully from ./storage") +except Exception as e: + print(f"Error loading LlamaIndex: {e}") + print( + "Please ensure you have run the index creation script first if this is your initial run." + ) + from llama_index.core import Document, VectorStoreIndex + + print("Creating a dummy index for demonstration purposes...") + bookstore_documents = [ + "Our shipping policy states that standard shipping takes 3-5 business days. Express shipping takes 1-2 business days. Free shipping is offered on all orders over $50.", + "Returns are accepted within 30 days of purchase, provided the book is in its original condition. To initiate a return, please visit our 'Returns' page on the website and fill out the form.", + "The 'BookWorm Rewards' program offers members 10% off all purchases and early access to sales. You earn 1 point for every $1 spent.", + "We accept Visa, Mastercard, American Express, and PayPal.", + "Currently, we do not offer international shipping outside of the United States and Canada.", + "The book 'The Midnight Library' by Matt Haig is a New York Times bestseller. It explores themes of regret and parallel lives.", + "Orders placed before 2 PM EST are processed on the same day.", + ] + documents = [Document(text=d) for d in bookstore_documents] + index = VectorStoreIndex.from_documents(documents) + index.storage_context.persist(persist_dir="./storage") + print("Dummy index created and saved.") + + +def retrieve_trusted_content( + query: str, top_k: int = 3 +): + """ + Retrieve relevant content from the bookstore's knowledge base. + This acts as the "lookup" for our customer service assistant. + + Parameters + ---------- + query + The customer's question used to semantically search the knowledge store. + top_k + The number of most relevant policy/book excerpts to retrieve. + """ + retriever = index.as_retriever(similarity_top_k=top_k) + nodes = retriever.retrieve(query) + # Format the retrieved content clearly so Chatlas can use it as "trusted" information + return [f"{x.text}" for x in nodes] + + +chat = ChatOpenAI( + system_prompt=( + "You are 'BookWorm Haven's Customer Service Assistant'. " + "Your primary goal is to help customers with their queries about shipping, returns, " + "payment methods, and book information based *only* on the provided trusted content. " + "If you cannot answer the question using the trusted content, politely state that " + "you don't have that information and suggest they visit the 'Help' section of the website." + ), + model="gpt-4o-mini", +) + +# This is where Chatlas learns to "look up" information when needed. +chat.register_tool(retrieve_trusted_content) + + +# Example Customer Interactions with the Assistant +print("\n--- BookWorm Haven Customer Service ---\n") + +# Example 1: Question that can be answered from the knowledge base (shipping policy) +customer_query_1 = "How long does standard shipping take?" +print(f"Customer: {customer_query_1}") +response_1 = chat.chat(customer_query_1) +print(f"Assistant: {response_1}\n") + +# Example 2: Another question answerable from the knowledge base (return policy) +customer_query_2 = "What's your return policy?" +print(f"Customer: {customer_query_2}") +response_2 = chat.chat(customer_query_2) +print(f"Assistant: {response_2}\n") + +# Example 3: Question about a specific book (information available) +customer_query_3 = "Tell me about 'The Midnight Library'." +print(f"Customer: {customer_query_3}") +response_3 = chat.chat(customer_query_3) +print(f"Assistant: {response_3}\n") + +# Example 4: Question with information not in the knowledge base +customer_query_4 = "Do you have any books about quantum physics for beginners?" +print(f"Customer: {customer_query_4}") +response_4 = chat.chat(customer_query_4) +print(f"Assistant: {response_4}\n") + +# Example 5: Combining information +customer_query_5 = "Is free shipping available and what payment methods do you accept?" +print(f"Customer: {customer_query_5}") +response_5 = chat.chat(customer_query_5) +print(f"Assistant: {response_5}\n") + +# Example 6: More nuanced query requiring retrieval +customer_query_6 = "I want to return a book, what should I do?" +print(f"Customer: {customer_query_6}") +response_6 = chat.chat(customer_query_6) +print(f"Assistant: {response_6}\n") diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py new file mode 100644 index 0000000..7abed49 --- /dev/null +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -0,0 +1,117 @@ +import json +from typing import List, Optional + +from dotenv import load_dotenv +from llama_index.core.llms import ChatMessage +from llama_index.core.program import LLMTextCompletionProgram +from llama_index.llms.openai import OpenAI +from pydantic import BaseModel, Field +from shiny.express import ui + +# Load environment variables from .env file +_ = load_dotenv() + + +# Define structured output models +class PirateResponse(BaseModel): + """A pirate's response with personality and structured data.""" + + message: str = Field(description="The pirate's main response message") + mood: str = Field( + description="The pirate's current mood (e.g., jolly, grumpy, excited)" + ) + treasure_count: Optional[int] = Field( + description="Number of treasures mentioned, if any" + ) + nautical_terms: List[str] = Field(description="List of nautical/pirate terms used") + + +class CrewMember(BaseModel): + """Information about a pirate crew member.""" + + name: str = Field(description="The crew member's name") + role: str = Field(description="Their role on the ship") + experience_years: int = Field(description="Years of sailing experience") + + +class PirateStory(BaseModel): + """A structured pirate story response.""" + + title: str = Field(description="Title of the pirate story") + story: str = Field(description="The main story content") + characters: List[CrewMember] = Field(description="Characters in the story") + moral: Optional[str] = Field(description="The moral of the story, if any") + + +_ = PirateResponse.model_rebuild() +_ = CrewMember.model_rebuild() +_ = PirateStory.model_rebuild() + + +llm = OpenAI( + model="gpt-4o-mini", +) + +ui.page_opts( + title="Shiny Chat with LlamaIndex Structured Output", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=[ + {"role": "system", "content": "You are a pirate with a colorful personality."}, + {"role": "user", "content": "What is your name, pirate?"}, + { + "role": "assistant", + "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me about a short story or a tale, and I'll spin you a yarn full of adventure and treasure!", + }, + ], +) +chat.ui() + + +async def get_response_tokens(conversation: list[ChatMessage]): + last_message = ( + conversation[-1].content.lower() + if conversation and conversation[-1].content + else "" + ) + + if "story" in last_message or "tale" in last_message: + program = LLMTextCompletionProgram.from_defaults( + output_cls=PirateStory, + llm=llm, + prompt_template_str=( + "You are a pirate storyteller. Based on this conversation: {conversation}\n" + "Create a pirate story with characters and a moral." + ), + ) + response = await program.acall(conversation=str(conversation)) + else: + program = LLMTextCompletionProgram.from_defaults( + output_cls=PirateResponse, + llm=llm, + prompt_template_str=( + "You are a pirate with a colorful personality. " + "Based on this conversation: {conversation}\n" + "Respond in character and include relevant nautical terms." + ), + ) + response = await program.acall(conversation=str(conversation)) + + json_response = f"```json\n{json.dumps(response.dict(), indent=2)}\n```" + + # Yield the response in chunks + for char in json_response: + yield char + + +@chat.on_user_submit +async def handle_user_input(): + conversation = [ + ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() + ] + + await chat.append_message_stream(get_response_tokens(conversation)) diff --git a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py new file mode 100644 index 0000000..63b862e --- /dev/null +++ b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py @@ -0,0 +1,91 @@ +from datetime import datetime + +import pytz +from dotenv import load_dotenv +from llama_index.core.agent import ReActAgent +from llama_index.core.llms import ChatMessage +from llama_index.core.tools import FunctionTool +from llama_index.llms.openai import OpenAI +from shiny.express import ui + +# Load environment variables from .env file +_ = load_dotenv() + + +def get_current_time(timezone: str = "UTC") -> str: + """Get the current time in the specified timezone. + + Args: + timezone: The timezone to get the time for (e.g., 'UTC', 'US/Eastern', 'US/Pacific') + + Returns: + Current time as a formatted string + """ + tz = pytz.timezone(timezone) + current_time = datetime.now(tz) + return current_time.strftime("%I:%M:%S %p %Z") + + +def get_current_date(timezone: str = "UTC") -> str: + """Get the current date in the specified timezone. + + Args: + timezone: The timezone to get the date for (e.g., 'UTC', 'US/Eastern', 'US/Pacific') + + Returns: + Current date as a formatted string + """ + tz = pytz.timezone(timezone) + current_date = datetime.now(tz) + return current_date.strftime("%A, %B %d, %Y") + + +time_tool = FunctionTool.from_defaults(fn=get_current_time) +date_tool = FunctionTool.from_defaults(fn=get_current_date) + +llm = OpenAI( + model="gpt-4o-mini", +) + +ui.page_opts( + title="Shiny Chat with LlamaIndex Tool Calling", + fillable=True, + fillable_mobile=True, +) + + +chat = ui.Chat( + id="chat", + messages=[ + { + "role": "system", + "content": "You are a pirate with a colorful personality. You can help users get the current time and date using your tools when they ask.", + }, + {"role": "user", "content": "What is your name, pirate?"}, + { + "role": "assistant", + "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! I can also tell ye the time and date if ye need to know when to set sail!", + }, + ], +) +chat.ui() + + +async def get_response_tokens(conversation: list[ChatMessage]): + + agent = ReActAgent.from_tools([time_tool, date_tool], llm=llm, verbose=True) + + last_message = conversation[-1].content if conversation else "" + + response_stream = await agent.astream_chat(last_message) + async for token in response_stream.async_response_gen(): + yield token + + +@chat.on_user_submit +async def handle_user_input(): + conversation = [ + ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() + ] + + await chat.append_message_stream(get_response_tokens(conversation)) diff --git a/pkg-py/tests/playwright/chat/llm_package/basic/app.py b/pkg-py/tests/playwright/chat/llm_package/basic/app.py new file mode 100644 index 0000000..c7ae83d --- /dev/null +++ b/pkg-py/tests/playwright/chat/llm_package/basic/app.py @@ -0,0 +1,36 @@ +import llm +from dotenv import load_dotenv +from shiny.express import ui + +# Load environment variables from .env file +_ = load_dotenv() + + +model = llm.get_model("gpt-4o-mini") + + +model.system_prompt = "You are a helpful assistant." + +ui.page_opts( + title="Hello LLM Chat", + fillable=True, + fillable_mobile=True, +) + + +chat = ui.Chat( + id="chat", + messages=["Hello! I am a bot using `llm` package with OpenAI. How can I help?"], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = model.prompt(user_input, stream=True) + + async def stream_generator(): + for chunk in response: + yield chunk + + await chat.append_message_stream(stream_generator()) diff --git a/pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py b/pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py new file mode 100644 index 0000000..6608de8 --- /dev/null +++ b/pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py @@ -0,0 +1,39 @@ +import llm +from dotenv import load_dotenv +from shiny.express import ui + +# Load environment variables from .env file +_ = load_dotenv() + + +def get_current_time() -> str: + """Returns the current date and time as a string.""" + from datetime import datetime + + return str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + + +model = llm.get_model("gpt-4o-mini") +model.system_prompt = "You are a helpful assistant." + + +ui.page_opts( + title="Hello LLM Chat", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=["Hello! I am a bot using `llm` package with OpenAI that showcases tool calling. How can I help? Ask me for the current time or date."], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + + response = model.chain(user_input, tools=[get_current_time]) + + # Stream the response to the chat window + await chat.append_message_stream(response) From 027acf5f7fcd1565b268d95ca5ca1f08d7fc6492 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Wed, 18 Jun 2025 13:19:47 +0530 Subject: [PATCH 05/31] remove comments in tool calling example --- .../chat/langchain/tool_calling/app.py | 66 +++++++++---------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py index 866fe85..292f5d7 100644 --- a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -30,6 +30,8 @@ def get_current_datetime() -> str: tools = [get_current_time, get_current_date, get_current_datetime] +tool_registry = {tool.name: tool for tool in tools} + chat_client = ChatOpenAI( api_key=os.environ.get("OPENAI_API_KEY"), model="gpt-4o", @@ -54,38 +56,32 @@ def get_current_datetime() -> str: async def handle_user_input(user_input: str): messages = [HumanMessage(content=user_input)] - async def stream_wrapper(): - # while True: - response = await chat_client.astream(messages) - - # if response.tool_calls: - # if response.content: - # yield response.content + "\n\n" - - # for tool_call in response.tool_calls: - # tool_to_call = None - # for tool in tools: - # if tool.name == tool_call["name"]: - # tool_to_call = tool - # break - - # if tool_to_call: - # tool_result = tool_to_call.invoke(tool_call["args"]) - - # messages.append(response) - # messages.append( - # ToolMessage( - # content=str(tool_result), tool_call_id=tool_call["id"] - # ) - # ) - - # continue - # else: - # yield response.content - # break - - async def stream_wrapper(): - async for item in response: - yield item.content - - await chat.append_message_stream(stream_wrapper()) + async def stream_response(): + accumulated_tool_calls = [] + + async for chunk in chat_client.astream(messages): + tool_calls = getattr(chunk, "tool_calls", None) + if tool_calls: + accumulated_tool_calls.extend(tool_calls) + + if chunk.content: + content = chunk.content + if isinstance(content, str): + yield content + elif isinstance(content, list): + for part in content: + if isinstance(part, str): + yield part + + for tool_call in accumulated_tool_calls: + tool_name = tool_call.get("name", "") + if not tool_name: + continue + + if tool_name in tool_registry: + result = tool_registry[tool_name].invoke({}) + yield f"\n\n🔧 {tool_name}: {result}" + else: + yield f"\n\n❌ Unknown tool: {tool_name}" + + await chat.append_message_stream(stream_response()) From 63e93bdbe1b6fe94c0b5e577ca601700d01b4508 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Wed, 18 Jun 2025 13:22:02 +0530 Subject: [PATCH 06/31] add extra packages --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ffaa1da..cc2929d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,10 @@ test = [ "shinywidgets", "tox-uv>=1", "pydantic-ai", - "python-dotenv" + "python-dotenv", + "llm", + "llama-index", + "langchain" ] [tool.uv] From 657015b3a0e59fa12aa42a4058e2fc265013eff2 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Wed, 18 Jun 2025 13:34:13 +0530 Subject: [PATCH 07/31] add missing structured_output for llm --- .../chat/llm_package/structured_output/app.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 pkg-py/tests/playwright/chat/llm_package/structured_output/app.py diff --git a/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py new file mode 100644 index 0000000..6c20e16 --- /dev/null +++ b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py @@ -0,0 +1,42 @@ +import llm +from dotenv import load_dotenv +from shiny.express import ui +from pydantic import BaseModel + + +# Load environment variables from .env file +_ = load_dotenv() + + +class Dog(BaseModel): + name: str + age: int + +model = llm.get_model("gpt-4o-mini") + + +model.system_prompt = "You are a helpful assistant." + +ui.page_opts( + title="Hello LLM Chat", + fillable=True, + fillable_mobile=True, +) + + +chat = ui.Chat( + id="chat", + messages=["Hello! I am a bot using `llm` package with OpenAI. How can I help?"], +) +chat.ui() + + +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = model.prompt(user_input, stream=True, schema=Dog) + + async def stream_generator(): + for chunk in response: + yield chunk + + await chat.append_message_stream(stream_generator()) From 772763339a3630160db7b545566318cf950650ee Mon Sep 17 00:00:00 2001 From: Karan Date: Mon, 30 Jun 2025 21:28:29 +0530 Subject: [PATCH 08/31] Update pkg-py/tests/playwright/chat/langchain/structured_output/app.py Co-authored-by: Carson Sievert --- pkg-py/tests/playwright/chat/langchain/structured_output/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py index 9d78110..d3f59ca 100644 --- a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py @@ -41,4 +41,4 @@ class Joke(BaseModel): async def handle_user_input(user_input: str): joke = chat_client.with_structured_output(Joke).invoke(user_input) joke_text = f"{joke.setup}\n\n{joke.punchline}\n\nRating: {joke.rating if joke.rating is not None else 'N/A'}" - await chat.append_message_stream(joke_text) + await chat.append_message(joke_text) From c3d5f532e0b88fb1defc0671a1aabe027c0dfb89 Mon Sep 17 00:00:00 2001 From: Karan Date: Mon, 30 Jun 2025 21:29:07 +0530 Subject: [PATCH 09/31] Update pkg-py/tests/playwright/chat/llama-index/structured_output/app.py Co-authored-by: Carson Sievert --- .../playwright/chat/llama-index/structured_output/app.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py index 7abed49..131cd7a 100644 --- a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -101,11 +101,7 @@ async def get_response_tokens(conversation: list[ChatMessage]): ) response = await program.acall(conversation=str(conversation)) - json_response = f"```json\n{json.dumps(response.dict(), indent=2)}\n```" - - # Yield the response in chunks - for char in json_response: - yield char + yield f"```json\n{json.dumps(response.dict(), indent=2)}\n```" @chat.on_user_submit From d3bdfaa6a289b8cc54b028b595c0f8dc421dd280 Mon Sep 17 00:00:00 2001 From: Karan Date: Mon, 30 Jun 2025 21:29:51 +0530 Subject: [PATCH 10/31] Update pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py Co-authored-by: Carson Sievert --- pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py index f243970..7a58c95 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py @@ -7,7 +7,6 @@ _ = load_dotenv() chat_client = Agent( "openai:o4-mini", - api_key=os.environ.get("OPENAI_API_KEY"), system_prompt="You are a helpful assistant.", ) From cf765eda46f1bef7dc35e2fd153793354222ddd3 Mon Sep 17 00:00:00 2001 From: Karan Date: Mon, 30 Jun 2025 21:30:04 +0530 Subject: [PATCH 11/31] Update pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py Co-authored-by: Carson Sievert --- .../tests/playwright/chat/pydantic-ai/structured_output/app.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py index 32e5cb1..b99150c 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py @@ -15,7 +15,6 @@ class CityLocation(BaseModel): _ = load_dotenv() chat_client = Agent( "openai:o4-mini", - api_key=os.environ.get("OPENAI_API_KEY"), system_prompt="You are a helpful assistant.", output_type=CityLocation, ) From 25f82b5891e3f22bc4a7401f0ee5fb617433c456 Mon Sep 17 00:00:00 2001 From: Karan Date: Mon, 30 Jun 2025 21:30:11 +0530 Subject: [PATCH 12/31] Update pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py Co-authored-by: Carson Sievert --- pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py index b72f254..6808727 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py @@ -7,7 +7,6 @@ _ = load_dotenv() chat_client = Agent( "openai:o4-mini", - api_key=os.environ.get("OPENAI_API_KEY"), system_prompt="You are a helpful assistant.", ) From 7cad49a209649dd527376df34d3b3f61dbdd4b78 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 08:27:04 +0530 Subject: [PATCH 13/31] remove data sci adventure example for pydantic --- .../pydantic-ai/data-sci-adventure/app.py | 154 ------------------ .../pydantic-ai/data-sci-adventure/prompt.md | 47 ------ 2 files changed, 201 deletions(-) delete mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py delete mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py deleted file mode 100644 index 124e4d9..0000000 --- a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/app.py +++ /dev/null @@ -1,154 +0,0 @@ -import os -from pathlib import Path - -from dotenv import load_dotenv -from faicons import icon_svg -from pydantic_ai import Agent -from shiny import App, Inputs, reactive, ui - -welcome = """ -Welcome to a choose-your-own learning adventure in data science! -Please pick your role, the size of the company you work for, and the industry you're in. -Then click the "Start adventure" button to begin. -""" - -app_ui = ui.page_sidebar( - ui.sidebar( - ui.input_selectize( - "company_role", - "You are a...", - choices=[ - "Machine Learning Engineer", - "Data Analyst", - "Research Scientist", - "MLOps Engineer", - "Data Science Generalist", - ], - selected="Data Analyst", - ), - ui.input_selectize( - "company_size", - "who works for...", - choices=[ - "yourself", - "a startup", - "a university", - "a small business", - "a medium-sized business", - "a large business", - "an enterprise corporation", - ], - selected="a medium-sized business", - ), - ui.input_selectize( - "company_industry", - "in the ... industry", - choices=[ - "Healthcare and Pharmaceuticals", - "Banking, Financial Services, and Insurance", - "Technology and Software", - "Retail and E-commerce", - "Media and Entertainment", - "Telecommunications", - "Automotive and Manufacturing", - "Energy and Oil & Gas", - "Agriculture and Food Production", - "Cybersecurity and Defense", - ], - selected="Healthcare and Pharmaceuticals", - ), - ui.input_action_button( - "go", - "Start adventure", - icon=icon_svg("play"), - class_="btn btn-primary", - ), - id="sidebar", - ), - ui.chat_ui("chat"), - title="Choose your own data science adventure", - fillable=True, - fillable_mobile=True, -) - - -def server(input: Inputs): - _ = load_dotenv() - - try: - app_dir = Path(__file__).parent - with open(app_dir / "prompt.md") as f: - system_prompt_content = f.read() - except FileNotFoundError: - print("Error: prompt.md not found. Please create it in the app directory.") - print("Using a default system prompt for now.") - system_prompt_content = ( - "You are a storyteller creating an interactive adventure." - ) - except Exception as e: - print(f"Error loading prompt.md: {e}") - system_prompt_content = ( - "You are a storyteller creating an interactive adventure." - ) - - chat_client = Agent( - "openai:o4-mini", - api_key=os.environ.get("OPENAI_API_KEY"), - system_prompt=system_prompt_content, - ) - - chat = ui.Chat(id="chat") - - @reactive.calc - def starting_prompt(): - return ( - f"I want a story that features a {input.company_role()} " - f"who works for {input.company_size()} in the {input.company_industry()} industry." - ) - - has_started: reactive.value[bool] = reactive.value(False) - - @reactive.effect - @reactive.event(input.go) - async def _(): - if has_started(): - await chat.clear_messages() - await chat.append_message(welcome) - chat.update_user_input(value=starting_prompt(), submit=True) - chat.update_user_input(value="", focus=True) - has_started.set(True) - - @reactive.effect - async def _(): - if has_started(): - ui.update_action_button( - "go", label="Restart adventure", icon=icon_svg("repeat") - ) - ui.update_sidebar("sidebar", show=False) - else: - if not chat.messages(): - await chat.append_message(welcome) - chat.update_user_input(value=starting_prompt()) - - async def pydantic_adventure_stream_generator(current_user_input: str): - async with chat_client.run_stream(current_user_input) as result: - async for chunk in result.stream_text(delta=True): - yield chunk - - @chat.on_user_submit - async def handle_user_submission(user_input: str): - n_msgs = len(chat.messages()) - current_input_for_llm = user_input - - if n_msgs == 1: - current_input_for_llm += " Please jump right into the story without any greetings or introductions." - elif n_msgs == 4: - current_input_for_llm += ". Time to nudge this story toward its conclusion. Give one more scenario (like creating a report, dashboard, or presentation) that will let me wrap this up successfully." - elif n_msgs == 5: - current_input_for_llm += ". Time to wrap this up. Conclude the story in the next step and offer to summarize the chat or create example scripts in R or Python. Consult your instructions for the correct format. If the user asks for code, remember that you'll need to create simulated data that matches the story." - - stream_generator = pydantic_adventure_stream_generator(current_input_for_llm) - await chat.append_message_stream(stream_generator) - - -app = App(app_ui, server) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md b/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md deleted file mode 100644 index 92f81cb..0000000 --- a/pkg-py/tests/playwright/chat/pydantic-ai/data-sci-adventure/prompt.md +++ /dev/null @@ -1,47 +0,0 @@ -You are an AI guide creating an interactive "choose-your-own adventure" experience for -data scientists. Your task is to present scenarios and choices that allow the user to -navigate through a series of real-world data science challenges and situations to -complete a successful data science project. - -Follow these steps to continue the adventure: - -1. Read the user's input carefully. - -2. Create 2-3 distinct choices for the user, considering: - - - Select appropriate emojis for each choice - - Determine which part of each choice should be marked as a clickable suggestion - -3. Present a short scenario (2-3 sentences) that builds upon the user's previous choice - or input. - -4. Offer 2-3 choices for the user to select from. Each choice should be formatted as - follows: - - - Begin with a single, relevant emoji - - Wrap the clickable part of the suggestion (the part that makes sense as a user response) in a span with class "suggestion" - - The entire choice, including the emoji and any additional context, should be on a single line - - Example format (do not use this content, create your own): - - * 📊 Analyze the data using regression analysis to identify trends - * 🧮 Apply clustering algorithms to segment the customer base - * 🔬 Conduct A/B testing on the new feature - -5. Ensure that your scenario and choices are creative, clear, and concise, while - remaining relevant to data science topics. - -6. Your goal is to guide the user to the end of a successful data science project that - is completed in 3-4 turns. - - Remember, the user will continue the adventure by selecting one of the choices you - present. Be prepared to build upon any of the options in future interactions. - -When the story reaches its conclusion, offer to summarize the conversation or to create -an example analysis using R or Python, but lean towards using Python. In Python, focus -on using polars and plotnine. In R, focus on tidyverse tools like dplyr and ggplot2. -Dashboards should be built with Shiny for Python or Shiny for R. Reports should be -written using Quarto. - -Remember to use the "suggestion submit" class for adventure steps and the "suggestion" -class for the summary choices. From 5e13404f44675b7674c18a330e5a1fb94821dc9e Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 09:38:47 +0530 Subject: [PATCH 14/31] update the example to make it into a shiny app --- .../{rag_example.py => app.py} | 80 ++++++++----------- 1 file changed, 35 insertions(+), 45 deletions(-) rename pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/{rag_example.py => app.py} (63%) diff --git a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py similarity index 63% rename from pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py rename to pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py index d7c7674..c94b68a 100644 --- a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/rag_example.py +++ b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py @@ -2,6 +2,7 @@ from chatlas import ChatOpenAI from llama_index.core import StorageContext, load_index_from_storage +from shiny.express import ui _ = os.environ.get("OPENAI_API_KEY") @@ -34,9 +35,7 @@ print("Dummy index created and saved.") -def retrieve_trusted_content( - query: str, top_k: int = 3 -): +def retrieve_trusted_content(query: str, top_k: int = 3): """ Retrieve relevant content from the bookstore's knowledge base. This acts as the "lookup" for our customer service assistant. @@ -54,7 +53,7 @@ def retrieve_trusted_content( return [f"{x.text}" for x in nodes] -chat = ChatOpenAI( +chat_client = ChatOpenAI( system_prompt=( "You are 'BookWorm Haven's Customer Service Assistant'. " "Your primary goal is to help customers with their queries about shipping, returns, " @@ -66,44 +65,35 @@ def retrieve_trusted_content( ) # This is where Chatlas learns to "look up" information when needed. -chat.register_tool(retrieve_trusted_content) - - -# Example Customer Interactions with the Assistant -print("\n--- BookWorm Haven Customer Service ---\n") - -# Example 1: Question that can be answered from the knowledge base (shipping policy) -customer_query_1 = "How long does standard shipping take?" -print(f"Customer: {customer_query_1}") -response_1 = chat.chat(customer_query_1) -print(f"Assistant: {response_1}\n") - -# Example 2: Another question answerable from the knowledge base (return policy) -customer_query_2 = "What's your return policy?" -print(f"Customer: {customer_query_2}") -response_2 = chat.chat(customer_query_2) -print(f"Assistant: {response_2}\n") - -# Example 3: Question about a specific book (information available) -customer_query_3 = "Tell me about 'The Midnight Library'." -print(f"Customer: {customer_query_3}") -response_3 = chat.chat(customer_query_3) -print(f"Assistant: {response_3}\n") - -# Example 4: Question with information not in the knowledge base -customer_query_4 = "Do you have any books about quantum physics for beginners?" -print(f"Customer: {customer_query_4}") -response_4 = chat.chat(customer_query_4) -print(f"Assistant: {response_4}\n") - -# Example 5: Combining information -customer_query_5 = "Is free shipping available and what payment methods do you accept?" -print(f"Customer: {customer_query_5}") -response_5 = chat.chat(customer_query_5) -print(f"Assistant: {response_5}\n") - -# Example 6: More nuanced query requiring retrieval -customer_query_6 = "I want to return a book, what should I do?" -print(f"Customer: {customer_query_6}") -response_6 = chat.chat(customer_query_6) -print(f"Assistant: {response_6}\n") +chat_client.register_tool(retrieve_trusted_content) + + +ui.page_opts( + title="BookWorm Haven Customer Service", + fillable=True, + fillable_mobile=True, +) + +chat = ui.Chat( + id="chat", + messages=[ + """ +Hello! I am BookWorm Haven's Customer Service Assistant. + +Here are some examples of what you can ask me: + +- How long does standard shipping take? +- What is your return policy? +- Can you tell me about 'The Midnight Library'? + + """ + ], +) +chat.ui() + + +# Generate a response when the user submits a message +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = await chat_client.stream_async(user_input) + await chat.append_message_stream(response) From 8128c0342e062f56463fb7d99f50bfaf22abc619 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 11:13:04 +0530 Subject: [PATCH 15/31] use context for maintaining state --- .../playwright/chat/llama-index/basic/app.py | 33 +++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py index a0f78d9..c364bbe 100644 --- a/pkg-py/tests/playwright/chat/llama-index/basic/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py @@ -1,9 +1,9 @@ from dotenv import load_dotenv -from llama_index.core.llms import ChatMessage +from llama_index.core.agent.workflow import FunctionAgent +from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI from shiny.express import ui -# Load environment variables from .env file _ = load_dotenv() llm = OpenAI( @@ -16,31 +16,38 @@ fillable_mobile=True, ) +agent = FunctionAgent( + tools=[], + llm=llm, + system_prompt="You are a pirate with a colorful personality.", +) + +ctx = Context(agent) chat = ui.Chat( id="chat", messages=[ - {"role": "system", "content": "You are a pirate with a colorful personality."}, - {"role": "user", "content": "What is your name, pirate?"}, { "role": "assistant", - "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas!", + "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me anything, matey!", }, ], ) chat.ui() -async def get_response_tokens(conversation: list[ChatMessage]): - response_stream = await llm.astream_chat(conversation) - async for r in response_stream: - yield r.delta +async def get_response_from_agent(user_message: str): + response = await agent.run(user_msg=user_message, ctx=ctx) + return str(response) @chat.on_user_submit async def handle_user_input(): - conversation = [ - ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() - ] + latest_messages = chat.messages() + latest_user_message = latest_messages[-1]["content"] + response = await get_response_from_agent(latest_user_message) + + async def stream_response(): + yield response - await chat.append_message_stream(get_response_tokens(conversation)) + await chat.append_message_stream(stream_response()) From dee6dcfedf53184c68d4897afb673e287a7ffed5 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 11:50:53 +0530 Subject: [PATCH 16/31] maintain context across the conversation --- .../playwright/chat/llm_package/basic/app.py | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llm_package/basic/app.py b/pkg-py/tests/playwright/chat/llm_package/basic/app.py index c7ae83d..d5da599 100644 --- a/pkg-py/tests/playwright/chat/llm_package/basic/app.py +++ b/pkg-py/tests/playwright/chat/llm_package/basic/app.py @@ -5,32 +5,36 @@ # Load environment variables from .env file _ = load_dotenv() - model = llm.get_model("gpt-4o-mini") - - model.system_prompt = "You are a helpful assistant." +conversation = model.conversation() + ui.page_opts( title="Hello LLM Chat", fillable=True, fillable_mobile=True, ) - chat = ui.Chat( id="chat", - messages=["Hello! I am a bot using `llm` package with OpenAI. How can I help?"], + messages=[ + "Hello! I am a bot using `llm` package with OpenAI. How can I help?" + ], ) chat.ui() -@chat.on_user_submit -async def handle_user_input(user_input: str): - response = model.prompt(user_input, stream=True) +def register_handlers(conversation): + @chat.on_user_submit + async def handle_user_input(user_input: str): + response = conversation.prompt(user_input, stream=True) + + async def stream_generator(): + for chunk in response: + yield chunk + + await chat.append_message_stream(stream_generator()) - async def stream_generator(): - for chunk in response: - yield chunk - await chat.append_message_stream(stream_generator()) +register_handlers(conversation) From 2ac6b95b06d66a5cb1ae76996b347e6619e7a8ef Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 13:30:46 +0530 Subject: [PATCH 17/31] make it more langchain specific --- .../chat/langchain/tool_calling/app.py | 81 ++++++++++--------- pyproject.toml | 3 +- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py index 292f5d7..575cc2f 100644 --- a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -2,7 +2,9 @@ from datetime import datetime from dotenv import load_dotenv -from langchain_core.messages import HumanMessage +from langchain.agents import AgentExecutor, create_openai_tools_agent +from langchain_core.messages import AIMessage, HumanMessage +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.tools import tool from langchain_openai import ChatOpenAI from shiny.express import ui @@ -23,19 +25,35 @@ def get_current_date() -> str: @tool -def get_current_datetime() -> str: - """Get the current date and time in a readable format.""" - return datetime.now().strftime("%Y-%m-%d %H:%M:%S") +def get_current_weather(city: str) -> str: + """Get the current weather for a given city.""" + return f"The current weather in {city} is sunny with a temperature of 25°C." -tools = [get_current_time, get_current_date, get_current_datetime] +@tool +def calculator(expression: str) -> str: + """Evaluate mathematical expressions""" + return str(eval(expression)) + + +tools = [get_current_time, get_current_date, calculator, get_current_weather] -tool_registry = {tool.name: tool for tool in tools} +prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant"), + MessagesPlaceholder("chat_history", optional=True), + ("human", "{input}"), + MessagesPlaceholder("agent_scratchpad"), + ] +) -chat_client = ChatOpenAI( +llm = ChatOpenAI( api_key=os.environ.get("OPENAI_API_KEY"), - model="gpt-4o", -).bind_tools(tools) + model="gpt-4.1-nano-2025-04-14", +) + +agent = create_openai_tools_agent(llm, tools, prompt) +agent_executor = AgentExecutor(agent=agent, tools=tools) ui.page_opts( title="Hello LangChain Chat Models with Tools", @@ -46,7 +64,7 @@ def get_current_datetime() -> str: chat = ui.Chat( id="chat", messages=[ - "Hello! How can I help you today? I can tell you the current time, date, or both!" + "Hello! I can help with time, date, calculator and other questions!" ], ) chat.ui() @@ -54,34 +72,23 @@ def get_current_datetime() -> str: @chat.on_user_submit async def handle_user_input(user_input: str): - messages = [HumanMessage(content=user_input)] + def convert_to_langchain_messages(messages): + return [ + HumanMessage(content=msg["content"]) + if msg["role"] == "user" + else AIMessage(content=msg["content"]) + for msg in messages + if msg["role"] in ["user", "assistant"] + ] + + current_messages = chat.messages()[:-1] + langchain_history = convert_to_langchain_messages(current_messages) async def stream_response(): - accumulated_tool_calls = [] - - async for chunk in chat_client.astream(messages): - tool_calls = getattr(chunk, "tool_calls", None) - if tool_calls: - accumulated_tool_calls.extend(tool_calls) - - if chunk.content: - content = chunk.content - if isinstance(content, str): - yield content - elif isinstance(content, list): - for part in content: - if isinstance(part, str): - yield part - - for tool_call in accumulated_tool_calls: - tool_name = tool_call.get("name", "") - if not tool_name: - continue - - if tool_name in tool_registry: - result = tool_registry[tool_name].invoke({}) - yield f"\n\n🔧 {tool_name}: {result}" - else: - yield f"\n\n❌ Unknown tool: {tool_name}" + async for chunk in agent_executor.astream( + {"input": user_input, "chat_history": langchain_history} + ): + if chunk.get("output"): + yield chunk["output"] await chat.append_message_stream(stream_response()) diff --git a/pyproject.toml b/pyproject.toml index cc2929d..2bd76d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,8 @@ test = [ "python-dotenv", "llm", "llama-index", - "langchain" + "langchain", + "langchain-openai", ] [tool.uv] From b656214eb692a3bf7dfdff7430a0e87e1b516a30 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 13:48:47 +0530 Subject: [PATCH 18/31] preserve context within the chat conversation --- .../playwright/chat/langchain/basic/app.py | 56 ++++++++++++++++--- 1 file changed, 48 insertions(+), 8 deletions(-) diff --git a/pkg-py/tests/playwright/chat/langchain/basic/app.py b/pkg-py/tests/playwright/chat/langchain/basic/app.py index 682ba86..e1f3035 100644 --- a/pkg-py/tests/playwright/chat/langchain/basic/app.py +++ b/pkg-py/tests/playwright/chat/langchain/basic/app.py @@ -1,34 +1,74 @@ import os - from dotenv import load_dotenv + +from langchain_core.chat_history import InMemoryChatMessageHistory +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_openai import ChatOpenAI from shiny.express import ui _ = load_dotenv() -chat_client = ChatOpenAI( + +model = ChatOpenAI( api_key=os.environ.get("OPENAI_API_KEY"), - model="gpt-4o", + model="gpt-4.1-nano-2025-04-14", +) + +prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are a helpful assistant. You answer in a friendly and concise manner.", + ), + MessagesPlaceholder(variable_name="history"), + ("human", "{input}"), + ] +) + +store = {} + + +def get_session_history(session_id: str): + if session_id not in store: + store[session_id] = InMemoryChatMessageHistory() + return store[session_id] + + +chain_with_history = RunnableWithMessageHistory( + prompt | model, + get_session_history, + input_messages_key="input", + history_messages_key="history", ) ui.page_opts( - title="Hello LangChain Chat Models", + title="Shiny Chat with LangChain History", fillable=True, fillable_mobile=True, ) chat = ui.Chat( id="chat", - messages=["Hello! How can I help you today?"], + messages=[ + { + "content": "Hello! I'm a chatbot that can remember our conversation. How can I help you today?", + "role": "assistant", + } + ], ) chat.ui() @chat.on_user_submit async def handle_user_input(user_input: str): - response = chat_client.astream(user_input) + config = {"configurable": {"session_id": "shiny_session_1"}} + response_stream = chain_with_history.astream( + {"input": user_input}, + config=config, + ) async def stream_wrapper(): - async for item in response: - yield item.content + async for chunk in response_stream: + yield chunk.content await chat.append_message_stream(stream_wrapper()) From 6cf3494616c5c23031e0b89b26bab3b71b835c88 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 14:11:12 +0530 Subject: [PATCH 19/31] allow streaming now in tool calling example --- .../chat/langchain/tool_calling/app.py | 47 ++++++++++++------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py index 575cc2f..ec65ba0 100644 --- a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -3,8 +3,9 @@ from dotenv import load_dotenv from langchain.agents import AgentExecutor, create_openai_tools_agent -from langchain_core.messages import AIMessage, HumanMessage +from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_core.tools import tool from langchain_openai import ChatOpenAI from shiny.express import ui @@ -55,8 +56,19 @@ def calculator(expression: str) -> str: agent = create_openai_tools_agent(llm, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) + store[session_id] = InMemoryChatMessageHistory() + return store[session_id] + + +agent_with_chat_history = RunnableWithMessageHistory( + agent_executor, + get_session_history, + input_messages_key="input", + history_messages_key="chat_history", +) + ui.page_opts( - title="Hello LangChain Chat Models with Tools", + title="Shiny Chat with LangChain Agent", fillable=True, fillable_mobile=True, ) @@ -64,7 +76,10 @@ def calculator(expression: str) -> str: chat = ui.Chat( id="chat", messages=[ - "Hello! I can help with time, date, calculator and other questions!" + { + "content": "Hello! I'm a chatbot with tools. I can get the time, date, weather, or do calculations. I'll also remember our conversation. How can I help?", + "role": "assistant", + } ], ) chat.ui() @@ -72,23 +87,19 @@ def calculator(expression: str) -> str: @chat.on_user_submit async def handle_user_input(user_input: str): - def convert_to_langchain_messages(messages): - return [ - HumanMessage(content=msg["content"]) - if msg["role"] == "user" - else AIMessage(content=msg["content"]) - for msg in messages - if msg["role"] in ["user", "assistant"] - ] - - current_messages = chat.messages()[:-1] - langchain_history = convert_to_langchain_messages(current_messages) + """ + Handles user input by streaming the agent's response. + """ + config = {"configurable": {"session_id": "shiny_session_tools_1"}} async def stream_response(): - async for chunk in agent_executor.astream( - {"input": user_input, "chat_history": langchain_history} + async for event in agent_with_chat_history.astream_events( + {"input": user_input}, config=config, version="v1" ): - if chunk.get("output"): - yield chunk["output"] + kind = event["event"] + if kind == "on_chat_model_stream": + content = event["data"]["chunk"].content + if content: + yield content await chat.append_message_stream(stream_response()) From f9c38c817f825bef85c5312cf0513397804f25fd Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 14:12:48 +0530 Subject: [PATCH 20/31] Add session-based chat history retrieval function Introduces a `get_session_history` function that retrieves or initializes chat history for a given session ID using an in-memory store. This supports session-based chat management in the application. --- .../tests/playwright/chat/langchain/tool_calling/app.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py index ec65ba0..075e4c8 100644 --- a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -56,6 +56,15 @@ def calculator(expression: str) -> str: agent = create_openai_tools_agent(llm, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) +store = {} + + +def get_session_history(session_id: str): + """ + Retrieves the chat history for a given session ID. + If no history exists, a new one is created. + """ + if session_id not in store: store[session_id] = InMemoryChatMessageHistory() return store[session_id] From b00f8a6fc6cf943a3e26874150055c6880b6a706 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 14:16:33 +0530 Subject: [PATCH 21/31] remove workout planner app example for pydantic ai --- .../chat/pydantic-ai/workout-plan/app.py | 102 ------------------ 1 file changed, 102 deletions(-) delete mode 100644 pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py deleted file mode 100644 index 52d6fa0..0000000 --- a/pkg-py/tests/playwright/chat/pydantic-ai/workout-plan/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import os - -from dotenv import load_dotenv -from faicons import icon_svg -from pydantic_ai import Agent -from shiny import reactive -from shiny.express import input, render, ui - -_ = load_dotenv() - -# Create a Pydantic AI agent -chat_client = Agent( - "openai:o4-mini", - api_key=os.environ.get("OPENAI_API_KEY"), - system_prompt=""" - You are a helpful AI fitness coach. - Give detailed workout plans to users based on their fitness goals and experience level. - Before getting into details, give a brief introduction to the workout plan. - Keep the overall tone encouraging and professional yet friendly. - Generate the response in Markdown format and avoid using h1, h2, or h3. - """, -) - -# Shiny page options -ui.page_opts(title="Personalized Workout Plan Generator") - -# Sidebar UI for user inputs -with ui.sidebar(open={"mobile": "always-above"}): - ui.input_select( - "goal", - "Fitness Goal", - ["Strength", "Cardio", "Flexibility", "General Fitness"], - ) - ui.input_selectize( - "equipment", - "Available Equipment", - ["Dumbbells", "Barbell", "Resistance Bands", "Bodyweight"], - multiple=True, - selected=["Bodyweight", "Barbell"], - ) - ui.input_slider("experience", "Experience Level", min=1, max=10, value=5) - ui.input_slider("duration", "Duration (mins)", min=15, max=90, step=5, value=45) - ui.input_select( - "daysPerWeek", - "Days per Week", - [str(i) for i in range(1, 8)], - selected="3", - ) - ui.input_task_button("generate", "Get Workout", icon=icon_svg("person-running")) - - # Download button UI - @render.express - def download_ui(): - plan = workout_stream.latest_stream.result() - - @render.download(filename="workout_plan.md", label="Download Workout") - def download(): - yield plan - - -# Create a Markdown stream -workout_stream = ui.MarkdownStream("workout_stream") - -# Display the Markdown stream in the main content area -workout_stream.ui( - content=( - "Hi there! 👋 I'm your AI fitness coach. 💪" - "\n\n" - "Fill out the form in the sidebar to get started. 📝 🏋️‍♂ ️" - ) -) - - -# Async generator function to stream the response from the Pydantic AI agent -async def pydantic_workout_stream_generator(user_prompt: str): - """ - An async generator that streams text chunks from the Pydantic AI agent. - """ - # chat_client is defined globally - async with chat_client.run_stream(user_prompt) as result: - async for chunk in result.stream_text(delta=True): - yield chunk - - -# Reactive effect to generate workout plan when the button is clicked -@reactive.effect -@reactive.event(input.generate) -async def _(): - # Construct the prompt based on user inputs - prompt = f""" - Generate a brief {input.duration()}-minute workout plan for a {input.goal()} fitness goal. - On a scale of 1-10, I have a level {input.experience()} experience, - works out {input.daysPerWeek()} days per week, and have access to: - {", ".join(input.equipment()) if input.equipment() else "no equipment"}. - Format the response in Markdown. - """ - - # Generate the stream using the pydantic_workout_stream_generator - stream_generator = pydantic_workout_stream_generator(prompt) - - # Pass the async generator to the MarkdownStream - await workout_stream.stream(stream_generator) From 8ff507ac8c84ccd42b70d1b9d77f62aef61a7949 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 14:45:02 +0530 Subject: [PATCH 22/31] make the basic llama-index have streaming responses --- .../playwright/chat/llama-index/basic/app.py | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py index c364bbe..c4ff075 100644 --- a/pkg-py/tests/playwright/chat/llama-index/basic/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py @@ -1,5 +1,5 @@ from dotenv import load_dotenv -from llama_index.core.agent.workflow import FunctionAgent +from llama_index.core.agent.workflow import FunctionAgent, AgentStream from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI from shiny.express import ui @@ -7,7 +7,7 @@ _ = load_dotenv() llm = OpenAI( - model="gpt-4o-mini", + model="gpt-4.1-nano-2025-04-14", ) ui.page_opts( @@ -36,18 +36,25 @@ chat.ui() -async def get_response_from_agent(user_message: str): - response = await agent.run(user_msg=user_message, ctx=ctx) - return str(response) +async def stream_response_from_agent(user_message: str, context: Context): + handler = agent.run(user_msg=user_message, ctx=context) + + async for event in handler.stream_events(): + if isinstance(event, AgentStream): + if event.delta: + yield event.delta + + await handler + @chat.on_user_submit async def handle_user_input(): latest_messages = chat.messages() latest_user_message = latest_messages[-1]["content"] - response = await get_response_from_agent(latest_user_message) - async def stream_response(): - yield response + async def stream_generator(): + async for chunk in stream_response_from_agent(latest_user_message, ctx): + yield chunk - await chat.append_message_stream(stream_response()) + await chat.append_message_stream(stream_generator()) From 61e0269d031009b523d923b2cedb24b295ec7462 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 14:56:56 +0530 Subject: [PATCH 23/31] Allow maintaining context and streaming for tool calling example --- .../chat/llama-index/tool_calling/app.py | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py index 63b862e..6877921 100644 --- a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py @@ -8,7 +8,6 @@ from llama_index.llms.openai import OpenAI from shiny.express import ui -# Load environment variables from .env file _ = load_dotenv() @@ -44,9 +43,11 @@ def get_current_date(timezone: str = "UTC") -> str: date_tool = FunctionTool.from_defaults(fn=get_current_date) llm = OpenAI( - model="gpt-4o-mini", + model="gpt-4.1-nano-2025-04-14", ) +agent = ReActAgent.from_tools([time_tool, date_tool], llm=llm, verbose=True) + ui.page_opts( title="Shiny Chat with LlamaIndex Tool Calling", fillable=True, @@ -71,21 +72,20 @@ def get_current_date(timezone: str = "UTC") -> str: chat.ui() -async def get_response_tokens(conversation: list[ChatMessage]): - - agent = ReActAgent.from_tools([time_tool, date_tool], llm=llm, verbose=True) - - last_message = conversation[-1].content if conversation else "" - - response_stream = await agent.astream_chat(last_message) - async for token in response_stream.async_response_gen(): - yield token - - @chat.on_user_submit async def handle_user_input(): - conversation = [ + messages = [ ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() ] - await chat.append_message_stream(get_response_tokens(conversation)) + last_message = messages[-1] + chat_history = messages[:-1] + + async def stream_generator(): + response_stream = await agent.astream_chat( + message=last_message.content, chat_history=chat_history + ) + async for token in response_stream.async_response_gen(): + yield token + + await chat.append_message_stream(stream_generator()) From 527e985539b778229131572910d0c37b205e042b Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 15:26:10 +0530 Subject: [PATCH 24/31] remove multiple output structures and allow streaming --- .../chat/llama-index/structured_output/app.py | 146 +++++++++--------- 1 file changed, 75 insertions(+), 71 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py index 131cd7a..3014fc5 100644 --- a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -1,113 +1,117 @@ -import json +from datetime import datetime from typing import List, Optional from dotenv import load_dotenv -from llama_index.core.llms import ChatMessage -from llama_index.core.program import LLMTextCompletionProgram +from llama_index.core.agent.workflow import AgentStream, FunctionAgent +from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI from pydantic import BaseModel, Field from shiny.express import ui -# Load environment variables from .env file _ = load_dotenv() -# Define structured output models -class PirateResponse(BaseModel): - """A pirate's response with personality and structured data.""" +class AnalysisResponse(BaseModel): + """A structured analysis response for complex queries.""" - message: str = Field(description="The pirate's main response message") - mood: str = Field( - description="The pirate's current mood (e.g., jolly, grumpy, excited)" + summary: str = Field(description="Executive summary of the analysis") + detailed_analysis: str = Field(description="Detailed analysis content") + methodology: Optional[str] = Field( + description="Methodology used for analysis" ) - treasure_count: Optional[int] = Field( - description="Number of treasures mentioned, if any" + conclusions: List[str] = Field(description="Key conclusions drawn") + recommendations: Optional[List[str]] = Field( + description="Actionable recommendations" ) - nautical_terms: List[str] = Field(description="List of nautical/pirate terms used") -class CrewMember(BaseModel): - """Information about a pirate crew member.""" +_ = AnalysisResponse.model_rebuild() - name: str = Field(description="The crew member's name") - role: str = Field(description="Their role on the ship") - experience_years: int = Field(description="Years of sailing experience") - - -class PirateStory(BaseModel): - """A structured pirate story response.""" - - title: str = Field(description="Title of the pirate story") - story: str = Field(description="The main story content") - characters: List[CrewMember] = Field(description="Characters in the story") - moral: Optional[str] = Field(description="The moral of the story, if any") - - -_ = PirateResponse.model_rebuild() -_ = CrewMember.model_rebuild() -_ = PirateStory.model_rebuild() - - -llm = OpenAI( - model="gpt-4o-mini", -) +llm = OpenAI(model="gpt-4.1-nano-2025-04-14") ui.page_opts( - title="Shiny Chat with LlamaIndex Structured Output", + title="Analysis Assistant", fillable=True, fillable_mobile=True, ) +agent = FunctionAgent( + tools=[], + llm=llm, + system_prompt="""You are an analytical assistant that provides thorough analysis. + Be clear, concise, and analytical in your responses.""", +) + +ctx = Context(agent) + +if not hasattr(ctx, "conversation_history"): + ctx.conversation_history = [] + chat = ui.Chat( id="chat", messages=[ - {"role": "system", "content": "You are a pirate with a colorful personality."}, - {"role": "user", "content": "What is your name, pirate?"}, { "role": "assistant", - "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! Ask me about a short story or a tale, and I'll spin you a yarn full of adventure and treasure!", + "content": "Hello! I'm your analysis assistant. I can help you analyze topics, data, and situations. What would you like me to analyze?", }, ], ) chat.ui() -async def get_response_tokens(conversation: list[ChatMessage]): - last_message = ( - conversation[-1].content.lower() - if conversation and conversation[-1].content - else "" +async def stream_response_from_agent(user_message: str, context: Context): + context.conversation_history.append( + { + "role": "user", + "content": user_message, + "timestamp": datetime.now().isoformat(), + } ) - if "story" in last_message or "tale" in last_message: - program = LLMTextCompletionProgram.from_defaults( - output_cls=PirateStory, - llm=llm, - prompt_template_str=( - "You are a pirate storyteller. Based on this conversation: {conversation}\n" - "Create a pirate story with characters and a moral." - ), - ) - response = await program.acall(conversation=str(conversation)) - else: - program = LLMTextCompletionProgram.from_defaults( - output_cls=PirateResponse, - llm=llm, - prompt_template_str=( - "You are a pirate with a colorful personality. " - "Based on this conversation: {conversation}\n" - "Respond in character and include relevant nautical terms." - ), + recent_context = "" + if len(context.conversation_history) > 1: + recent_messages = context.conversation_history[-3:] + recent_context = "\n".join( + [f"{msg['role']}: {msg['content']}" for msg in recent_messages] ) - response = await program.acall(conversation=str(conversation)) - yield f"```json\n{json.dumps(response.dict(), indent=2)}\n```" + enhanced_message = f""" + Context from recent conversation: + {recent_context} + + Current request: {user_message} + + Please provide a clear analytical response. + """ + + handler = agent.run(user_msg=enhanced_message, ctx=context) + response_content = "" + + async for event in handler.stream_events(): + if isinstance(event, AgentStream): + if event.delta: + response_content += event.delta + yield event.delta + + await handler + + context.conversation_history.append( + { + "role": "assistant", + "content": response_content, + "timestamp": datetime.now().isoformat(), + } + ) @chat.on_user_submit async def handle_user_input(): - conversation = [ - ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() - ] + """Handle user input and stream response.""" + latest_messages = chat.messages() + latest_user_message = latest_messages[-1]["content"] + + async def stream_generator(): + async for chunk in stream_response_from_agent(latest_user_message, ctx): + yield chunk - await chat.append_message_stream(get_response_tokens(conversation)) + await chat.append_message_stream(stream_generator()) From a3e72611fefce988860ceff9dcdb3bb1fda44d67 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 15:28:06 +0530 Subject: [PATCH 25/31] make the app even streamlined --- .../chat/llama-index/structured_output/app.py | 38 +------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py index 3014fc5..a3527b3 100644 --- a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -60,49 +60,15 @@ class AnalysisResponse(BaseModel): async def stream_response_from_agent(user_message: str, context: Context): - context.conversation_history.append( - { - "role": "user", - "content": user_message, - "timestamp": datetime.now().isoformat(), - } - ) - - recent_context = "" - if len(context.conversation_history) > 1: - recent_messages = context.conversation_history[-3:] - recent_context = "\n".join( - [f"{msg['role']}: {msg['content']}" for msg in recent_messages] - ) - - enhanced_message = f""" - Context from recent conversation: - {recent_context} - - Current request: {user_message} - - Please provide a clear analytical response. - """ - - handler = agent.run(user_msg=enhanced_message, ctx=context) - response_content = "" - + handler = agent.run(user_msg=user_message, ctx=context) + async for event in handler.stream_events(): if isinstance(event, AgentStream): if event.delta: - response_content += event.delta yield event.delta await handler - context.conversation_history.append( - { - "role": "assistant", - "content": response_content, - "timestamp": datetime.now().isoformat(), - } - ) - @chat.on_user_submit async def handle_user_input(): From 7093e2b5b5216f1500d6bd51526a7c26d51b87c0 Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 1 Jul 2025 15:44:02 +0530 Subject: [PATCH 26/31] use a streaming and stateful chatbot for pydantic ai basic app --- .../playwright/chat/pydantic-ai/basic/app.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py index 7a58c95..838c421 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py @@ -1,25 +1,25 @@ -import os +from typing import List from dotenv import load_dotenv from pydantic_ai import Agent +from pydantic_ai.messages import ModelMessage +from shiny import reactive from shiny.express import ui _ = load_dotenv() chat_client = Agent( - "openai:o4-mini", + "openai:gpt-4.1-nano-2025-04-14", system_prompt="You are a helpful assistant.", ) +conversation_history = reactive.value(list[ModelMessage]([])) -# Set some Shiny page options ui.page_opts( title="Hello OpenAI Chat", fillable=True, fillable_mobile=True, ) - -# Create and display a Shiny chat component chat = ui.Chat( id="chat", messages=["Hello! How can I help you today?"], @@ -27,15 +27,20 @@ chat.ui() -# Generate a response when the user submits a message @chat.on_user_submit async def handle_user_input(user_input: str): - stream = pydantic_stream_generator(user_input) + current_history = conversation_history.get() + stream = pydantic_stream_generator(user_input, current_history) await chat.append_message_stream(stream) -# An async generator function to stream the response from the Pydantic AI agent -async def pydantic_stream_generator(user_input: str): - async with chat_client.run_stream(user_input) as result: +async def pydantic_stream_generator( + user_input: str, current_history: List[ModelMessage] +): + message_history = current_history if current_history else None + async with chat_client.run_stream( + user_input, message_history=message_history + ) as result: async for chunk in result.stream_text(delta=True): yield chunk + conversation_history.set(result.all_messages()) From 622c82e7b8a3f5d73ed6da45c8430ae65672df90 Mon Sep 17 00:00:00 2001 From: Karan Date: Tue, 15 Jul 2025 08:46:06 +0530 Subject: [PATCH 27/31] Update pkg-py/tests/playwright/chat/llama-index/basic/app.py Co-authored-by: Carson Sievert --- pkg-py/tests/playwright/chat/llama-index/basic/app.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py index c4ff075..e78dc71 100644 --- a/pkg-py/tests/playwright/chat/llama-index/basic/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py @@ -49,12 +49,10 @@ async def stream_response_from_agent(user_message: str, context: Context): @chat.on_user_submit -async def handle_user_input(): - latest_messages = chat.messages() - latest_user_message = latest_messages[-1]["content"] +async def handle_user_input(user_input: str): async def stream_generator(): - async for chunk in stream_response_from_agent(latest_user_message, ctx): + async for chunk in stream_response_from_agent(user_input, ctx): yield chunk await chat.append_message_stream(stream_generator()) From 9877d83964d4c3d187f7ce7bf5abbbbe477be205 Mon Sep 17 00:00:00 2001 From: Karan Date: Tue, 15 Jul 2025 08:46:17 +0530 Subject: [PATCH 28/31] Update pkg-py/tests/playwright/chat/llm_package/basic/app.py Co-authored-by: Carson Sievert --- .../playwright/chat/llm_package/basic/app.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llm_package/basic/app.py b/pkg-py/tests/playwright/chat/llm_package/basic/app.py index d5da599..4fabb21 100644 --- a/pkg-py/tests/playwright/chat/llm_package/basic/app.py +++ b/pkg-py/tests/playwright/chat/llm_package/basic/app.py @@ -25,16 +25,12 @@ chat.ui() -def register_handlers(conversation): - @chat.on_user_submit - async def handle_user_input(user_input: str): - response = conversation.prompt(user_input, stream=True) +@chat.on_user_submit +async def handle_user_input(user_input: str): + response = conversation.prompt(user_input, stream=True) - async def stream_generator(): - for chunk in response: - yield chunk + async def stream_generator(): + for chunk in response: + yield chunk - await chat.append_message_stream(stream_generator()) - - -register_handlers(conversation) + await chat.append_message_stream(stream_generator()) From c05c0c4e1f12e83712831ae6674c053a2db086d1 Mon Sep 17 00:00:00 2001 From: Karan Date: Tue, 15 Jul 2025 08:47:16 +0530 Subject: [PATCH 29/31] Update pkg-py/tests/playwright/chat/llama-index/structured_output/app.py Co-authored-by: Carson Sievert --- .../playwright/chat/llama-index/structured_output/app.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py index a3527b3..abb74ed 100644 --- a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -71,13 +71,10 @@ async def stream_response_from_agent(user_message: str, context: Context): @chat.on_user_submit -async def handle_user_input(): - """Handle user input and stream response.""" - latest_messages = chat.messages() - latest_user_message = latest_messages[-1]["content"] +async def handle_user_input(user_input: str): async def stream_generator(): - async for chunk in stream_response_from_agent(latest_user_message, ctx): + async for chunk in stream_response_from_agent(user_input, ctx): yield chunk await chat.append_message_stream(stream_generator()) From 539e207e93fe54c6ea91150f0cefe25b2529d04b Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 15 Jul 2025 10:07:47 +0530 Subject: [PATCH 30/31] Refactor chat UI initialization and improve assistant prompts Standardizes chat UI initialization by moving message setup into chat.ui() calls and enhances assistant messages with suggestions and clearer instructions across multiple Playwright chat test apps. Also updates imports and agent usage for consistency and clarity. --- .../playwright/chat/langchain/basic/app.py | 5 +- .../chat/langchain/structured_output/app.py | 20 ++++++-- .../chat/langchain/tool_calling/app.py | 3 +- .../playwright/chat/llama-index/basic/app.py | 7 ++- .../chat/llama-index/rag_with_chatlas/app.py | 4 +- .../chat/llama-index/structured_output/app.py | 16 ++++-- .../chat/llama-index/tool_calling/app.py | 49 ++++++++++--------- .../playwright/chat/llm_package/basic/app.py | 10 ++-- .../chat/llm_package/structured_output/app.py | 20 ++++++-- .../{tool_calling_ex => tool_calling}/app.py | 11 +++-- .../playwright/chat/pydantic-ai/basic/app.py | 10 +++- .../chat/pydantic-ai/structured_output/app.py | 22 ++++++--- .../chat/pydantic-ai/tool_calling/app.py | 10 +++- 13 files changed, 127 insertions(+), 60 deletions(-) rename pkg-py/tests/playwright/chat/llm_package/{tool_calling_ex => tool_calling}/app.py (79%) diff --git a/pkg-py/tests/playwright/chat/langchain/basic/app.py b/pkg-py/tests/playwright/chat/langchain/basic/app.py index e1f3035..3bb49f1 100644 --- a/pkg-py/tests/playwright/chat/langchain/basic/app.py +++ b/pkg-py/tests/playwright/chat/langchain/basic/app.py @@ -1,6 +1,6 @@ import os -from dotenv import load_dotenv +from dotenv import load_dotenv from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables.history import RunnableWithMessageHistory @@ -49,6 +49,8 @@ def get_session_history(session_id: str): chat = ui.Chat( id="chat", +) +chat.ui( messages=[ { "content": "Hello! I'm a chatbot that can remember our conversation. How can I help you today?", @@ -56,7 +58,6 @@ def get_session_history(session_id: str): } ], ) -chat.ui() @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py index d3f59ca..d7d1137 100644 --- a/pkg-py/tests/playwright/chat/langchain/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/langchain/structured_output/app.py @@ -14,7 +14,9 @@ class Joke(BaseModel): setup: str = Field(description="The setup of the joke") punchline: str = Field(description="The punchline to the joke") - rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10") + rating: Optional[int] = Field( + description="How funny the joke is, from 1 to 10" + ) _ = Joke.model_rebuild() @@ -32,9 +34,21 @@ class Joke(BaseModel): chat = ui.Chat( id="chat", - messages=["Hello! How can I help you today?"], ) -chat.ui() +chat.ui( + messages=[ + { + "role": "assistant", + "content": """ +Hello! I'm your joke assistant. I can tell you jokes and rate them. +Here are some examples of what you can ask me: +- Tell me a joke about pirates. +- Can you tell me a funny joke about skeletons? +- Give me a joke about cats. + """, + } + ], +) @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py index 075e4c8..b7fa2cd 100644 --- a/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/langchain/tool_calling/app.py @@ -84,6 +84,8 @@ def get_session_history(session_id: str): chat = ui.Chat( id="chat", +) +chat.ui( messages=[ { "content": "Hello! I'm a chatbot with tools. I can get the time, date, weather, or do calculations. I'll also remember our conversation. How can I help?", @@ -91,7 +93,6 @@ def get_session_history(session_id: str): } ], ) -chat.ui() @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/llama-index/basic/app.py b/pkg-py/tests/playwright/chat/llama-index/basic/app.py index e78dc71..873f2f4 100644 --- a/pkg-py/tests/playwright/chat/llama-index/basic/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/basic/app.py @@ -1,5 +1,5 @@ from dotenv import load_dotenv -from llama_index.core.agent.workflow import FunctionAgent, AgentStream +from llama_index.core.agent.workflow import AgentStream, FunctionAgent from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI from shiny.express import ui @@ -26,6 +26,8 @@ chat = ui.Chat( id="chat", +) +chat.ui( messages=[ { "role": "assistant", @@ -33,7 +35,6 @@ }, ], ) -chat.ui() async def stream_response_from_agent(user_message: str, context: Context): @@ -47,10 +48,8 @@ async def stream_response_from_agent(user_message: str, context: Context): await handler - @chat.on_user_submit async def handle_user_input(user_input: str): - async def stream_generator(): async for chunk in stream_response_from_agent(user_input, ctx): yield chunk diff --git a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py index c94b68a..3798ca7 100644 --- a/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/rag_with_chatlas/app.py @@ -1,10 +1,10 @@ -import os from chatlas import ChatOpenAI +from dotenv import load_dotenv from llama_index.core import StorageContext, load_index_from_storage from shiny.express import ui -_ = os.environ.get("OPENAI_API_KEY") +_ = load_dotenv() # Load the knowledge store (index) from disk diff --git a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py index abb74ed..a70e0da 100644 --- a/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/structured_output/app.py @@ -1,4 +1,3 @@ -from datetime import datetime from typing import List, Optional from dotenv import load_dotenv @@ -49,19 +48,27 @@ class AnalysisResponse(BaseModel): chat = ui.Chat( id="chat", +) +chat.ui( messages=[ { "role": "assistant", - "content": "Hello! I'm your analysis assistant. I can help you analyze topics, data, and situations. What would you like me to analyze?", + "content": """ +Hello! I'm your analysis assistant. I can help you analyze topics, data, and situations. +Here are some examples of what you can ask me: + +- Analyze the impact of remote work on productivity. +- Provide a detailed analysis of electric vehicle adoption. +- What are the key conclusions from recent climate change studies? + """, }, ], ) -chat.ui() async def stream_response_from_agent(user_message: str, context: Context): handler = agent.run(user_msg=user_message, ctx=context) - + async for event in handler.stream_events(): if isinstance(event, AgentStream): if event.delta: @@ -72,7 +79,6 @@ async def stream_response_from_agent(user_message: str, context: Context): @chat.on_user_submit async def handle_user_input(user_input: str): - async def stream_generator(): async for chunk in stream_response_from_agent(user_input, ctx): yield chunk diff --git a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py index 6877921..09a764e 100644 --- a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py @@ -2,8 +2,8 @@ import pytz from dotenv import load_dotenv -from llama_index.core.agent import ReActAgent -from llama_index.core.llms import ChatMessage +from llama_index.core.agent.workflow import AgentStream, FunctionAgent +from llama_index.core.workflow import Context from llama_index.core.tools import FunctionTool from llama_index.llms.openai import OpenAI from shiny.express import ui @@ -46,46 +46,51 @@ def get_current_date(timezone: str = "UTC") -> str: model="gpt-4.1-nano-2025-04-14", ) -agent = ReActAgent.from_tools([time_tool, date_tool], llm=llm, verbose=True) - ui.page_opts( title="Shiny Chat with LlamaIndex Tool Calling", fillable=True, fillable_mobile=True, ) +agent = FunctionAgent( + tools=[time_tool, date_tool], + llm=llm, + system_prompt="You are a pirate with a colorful personality. You can tell people the time and date when they need to know when to set sail!", +) + +ctx = Context(agent) chat = ui.Chat( id="chat", +) +chat.ui( messages=[ - { - "role": "system", - "content": "You are a pirate with a colorful personality. You can help users get the current time and date using your tools when they ask.", - }, - {"role": "user", "content": "What is your name, pirate?"}, { "role": "assistant", "content": "Arrr, they call me Captain Cog, the chattiest pirate on the seven seas! I can also tell ye the time and date if ye need to know when to set sail!", }, ], ) -chat.ui() -@chat.on_user_submit -async def handle_user_input(): - messages = [ - ChatMessage(role=msg["role"], content=msg["content"]) for msg in chat.messages() - ] +async def stream_response_from_agent(user_message: str, context: Context): + """Stream response from agent using the context-based approach.""" + handler = agent.run(user_msg=user_message, ctx=context) + + async for event in handler.stream_events(): + if isinstance(event, AgentStream): + if event.delta: + yield event.delta + + await handler - last_message = messages[-1] - chat_history = messages[:-1] + +@chat.on_user_submit +async def handle_user_input(user_input: str): + """Handle user input and stream response using context.""" async def stream_generator(): - response_stream = await agent.astream_chat( - message=last_message.content, chat_history=chat_history - ) - async for token in response_stream.async_response_gen(): - yield token + async for chunk in stream_response_from_agent(user_input, ctx): + yield chunk await chat.append_message_stream(stream_generator()) diff --git a/pkg-py/tests/playwright/chat/llm_package/basic/app.py b/pkg-py/tests/playwright/chat/llm_package/basic/app.py index 4fabb21..b0285d3 100644 --- a/pkg-py/tests/playwright/chat/llm_package/basic/app.py +++ b/pkg-py/tests/playwright/chat/llm_package/basic/app.py @@ -18,11 +18,15 @@ chat = ui.Chat( id="chat", +) +chat.ui( messages=[ - "Hello! I am a bot using `llm` package with OpenAI. How can I help?" - ], + { + "content": "Hello! I’m your AI assistant for time queries. Ask me for the current time in any city or timezone (for example: 'What time is it in Tokyo?'), and I’ll provide the answer. How can I assist you today?", + "role": "assistant", + } + ] ) -chat.ui() @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py index 6c20e16..e7c4b44 100644 --- a/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/llm_package/structured_output/app.py @@ -1,8 +1,7 @@ import llm from dotenv import load_dotenv -from shiny.express import ui from pydantic import BaseModel - +from shiny.express import ui # Load environment variables from .env file _ = load_dotenv() @@ -12,6 +11,7 @@ class Dog(BaseModel): name: str age: int + model = llm.get_model("gpt-4o-mini") @@ -26,9 +26,21 @@ class Dog(BaseModel): chat = ui.Chat( id="chat", - messages=["Hello! I am a bot using `llm` package with OpenAI. How can I help?"], ) -chat.ui() +chat.ui( + messages=[ + { + "content": """ + Here are some examples of what you can ask me: + +- Tell me about a dog named Bella who is 3 years old. +- I have a dog named Rocky, age 7. +- Give me info about a puppy called Luna, 1 year old. + """, + "role": "assistant", + } + ] +) @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py b/pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py similarity index 79% rename from pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py rename to pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py index 6608de8..c704254 100644 --- a/pkg-py/tests/playwright/chat/llm_package/tool_calling_ex/app.py +++ b/pkg-py/tests/playwright/chat/llm_package/tool_calling/app.py @@ -25,14 +25,19 @@ def get_current_time() -> str: chat = ui.Chat( id="chat", - messages=["Hello! I am a bot using `llm` package with OpenAI that showcases tool calling. How can I help? Ask me for the current time or date."], ) -chat.ui() +chat.ui( + messages=[ + { + "content": "Hello! I'm a chatbot that can help you with the current time. How can I assist you today?", + "role": "assistant", + } + ], +) @chat.on_user_submit async def handle_user_input(user_input: str): - response = model.chain(user_input, tools=[get_current_time]) # Stream the response to the chat window diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py index 838c421..3eed3df 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/basic/app.py @@ -22,9 +22,15 @@ chat = ui.Chat( id="chat", - messages=["Hello! How can I help you today?"], ) -chat.ui() +chat.ui( + messages=[ + { + "content": "Hello! I’m your AI assistant. You can ask me questions, get explanations, or help with tasks like brainstorming, summarizing, or coding. How can I assist you today?", + "role": "user", + } + ] +) @chat.on_user_submit diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py index b99150c..d9b6d96 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/structured_output/app.py @@ -1,5 +1,3 @@ -import os - from dotenv import load_dotenv from pydantic import BaseModel from pydantic_ai import Agent @@ -31,16 +29,26 @@ class CityLocation(BaseModel): # Create and display a Shiny chat component chat = ui.Chat( id="chat", - messages=["Hello! Ask me where the superbowl was held in any year?"], ) -chat.ui() +chat.ui( + messages=[ + { + "role": "assistant", + "content": """ +Hello! Ask me where the superbowl was held in any year and I can tell the state, county, and city. +For example, you can ask: +- Where was the superbowl in 2020? +- What city hosted the superbowl in 2015? +- Where was the superbowl in 2018? +""", + } + ], +) @chat.on_user_submit async def handle_user_input(user_input: str): result = await chat_client.run(user_input) city_info = result.output - message = ( - f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}" - ) + message = f"City: {city_info.city}, County: {city_info.county}, State: {city_info.state}" await chat.append_message(message) diff --git a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py index 6808727..efdb350 100644 --- a/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/pydantic-ai/tool_calling/app.py @@ -42,9 +42,15 @@ def get_joke() -> str: # Create and display a Shiny chat component chat = ui.Chat( id="chat", - messages=["Hello! Ask me to tell a joke, get the weather, or tell the time."], ) -chat.ui() +chat.ui( + messages=[ + { + "content": "Hello! I'm a chatbot that can help you with weather, time, and jokes. How can I assist you today?", + "role": "assistant", + } + ], +) # Generate a response when the user submits a message From d35d3a25e32e4844e70eebf7cd66414d4343a86f Mon Sep 17 00:00:00 2001 From: Karan Gathani Date: Tue, 15 Jul 2025 10:09:12 +0530 Subject: [PATCH 31/31] Reorder imports in app.py for consistency Moved the import of Context to group it with related imports, improving code readability and maintaining a consistent import order. --- pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py index 09a764e..f127432 100644 --- a/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py +++ b/pkg-py/tests/playwright/chat/llama-index/tool_calling/app.py @@ -3,8 +3,8 @@ import pytz from dotenv import load_dotenv from llama_index.core.agent.workflow import AgentStream, FunctionAgent -from llama_index.core.workflow import Context from llama_index.core.tools import FunctionTool +from llama_index.core.workflow import Context from llama_index.llms.openai import OpenAI from shiny.express import ui