Skip to content

Commit 6ef19fb

Browse files
committed
Update Chat templates and examples to use chatlas
1 parent 2ce08c0 commit 6ef19fb

File tree

17 files changed

+195
-164
lines changed

17 files changed

+195
-164
lines changed

shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,17 @@
44
# To get started, follow the instructions at https://aws.amazon.com/bedrock/claude/
55
# as well as https://github.com/anthropics/anthropic-sdk-python#aws-bedrock
66
# ------------------------------------------------------------------------------------
7-
from anthropic import AnthropicBedrock
87
from app_utils import load_dotenv
8+
from chatlas import ChatBedrockAnthropic
99

1010
from shiny.express import ui
1111

1212
# Either explicitly set the AWS environment variables before launching the app, or set
1313
# them in a file named `.env`. The `python-dotenv` package will load `.env` as
1414
# environment variables which can be read by `os.getenv()`.
1515
load_dotenv()
16-
llm = AnthropicBedrock(
16+
chat_model = ChatBedrockAnthropic(
17+
model="anthropic.claude-3-sonnet-20240229-v1:0",
1718
# aws_secret_key=os.getenv("AWS_SECRET_KEY"),
1819
# aws_access_key=os.getenv("AWS_ACCESS_KEY"),
1920
# aws_region=os.getenv("AWS_REGION"),
@@ -34,15 +35,6 @@
3435

3536
# Define a callback to run when the user submits a message
3637
@chat.on_user_submit
37-
async def _():
38-
# Get messages currently in the chat
39-
messages = chat.messages(format="anthropic")
40-
# Create a response message stream
41-
response = llm.messages.create(
42-
model="anthropic.claude-3-5-sonnet-20241022-v2:0",
43-
messages=messages,
44-
stream=True,
45-
max_tokens=1000,
46-
)
47-
# Append the response stream into the chat
38+
async def handle_user_input(user_input):
39+
response = chat_model.stream(user_input)
4840
await chat.append_message_stream(response)
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
shiny
22
python-dotenv
33
tokenizers
4-
anthropic
4+
chatlas
5+
anthropic[bedrock]
Lines changed: 12 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,41 @@
11
# ------------------------------------------------------------------------------------
22
# A basic Shiny Chat example powered by OpenAI running on Azure.
3-
# To run it, you'll need OpenAI API key.
4-
# To get setup, follow the instructions at https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python#create-a-new-python-application
53
# ------------------------------------------------------------------------------------
64
import os
75

86
from app_utils import load_dotenv
9-
from openai import AzureOpenAI
7+
from chatlas import ChatAzureOpenAI
108

119
from shiny.express import ui
1210

13-
# Either explicitly set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment
14-
# variables before launching the app, or set them in a file named `.env`. The
15-
# `python-dotenv` package will load `.env` as environment variables which can later be
16-
# read by `os.getenv()`.
11+
# ChatAzureOpenAI() requires an API key from Azure OpenAI.
12+
# See the docs for more information on how to obtain one.
13+
# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html
1714
load_dotenv()
18-
19-
llm = AzureOpenAI(
15+
chat_model = ChatAzureOpenAI(
2016
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
21-
api_version="2024-02-01",
22-
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), # type: ignore
17+
endpoint="https://my-endpoint.openai.azure.com",
18+
deployment_id="gpt-4o-mini",
19+
api_version="2024-08-01-preview",
2320
)
2421

25-
deployment_name = "REPLACE_WITH_YOUR_DEPLOYMENT_NAME"
26-
2722
# Set some Shiny page options
2823
ui.page_opts(
29-
title="Hello OpenAI Chat",
24+
title="Hello Azure OpenAI Chat",
3025
fillable=True,
3126
fillable_mobile=True,
3227
)
3328

3429
# Create a chat instance, with an initial message
3530
chat = ui.Chat(
3631
id="chat",
37-
messages=[
38-
{"content": "Hello! How can I help you today?", "role": "assistant"},
39-
],
32+
messages=["Hello! How can I help you today?"],
4033
)
41-
42-
# Display the chat
4334
chat.ui()
4435

4536

4637
# Define a callback to run when the user submits a message
4738
@chat.on_user_submit
48-
async def _():
49-
# Get messages currently in the chat
50-
messages = chat.messages(format="openai")
51-
# Create a response message stream
52-
response = llm.chat.completions.create(
53-
model=deployment_name,
54-
messages=messages,
55-
stream=True,
56-
)
57-
# Append the response stream into the chat
39+
async def handle_user_input(user_input):
40+
response = chat_model.stream(user_input)
5841
await chat.append_message_stream(response)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
shiny
22
python-dotenv
33
tokenizers
4+
chatlas
45
openai
Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,23 @@
11
# ------------------------------------------------------------------------------------
22
# A basic Shiny Chat example powered by Anthropic's Claude model.
3-
# To run it, you'll need an Anthropic API key.
4-
# To get one, follow the instructions at https://docs.anthropic.com/en/api/getting-started
53
# ------------------------------------------------------------------------------------
64
import os
75

8-
from anthropic import AsyncAnthropic
96
from app_utils import load_dotenv
7+
from chatlas import ChatAnthropic
108

119
from shiny.express import ui
1210

13-
# Either explicitly set the ANTHROPIC_API_KEY environment variable before launching the
14-
# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env`
15-
# as environment variables which can later be read by `os.getenv()`.
11+
# ChatAnthropic() requires an API key from Anthropic.
12+
# See the docs for more information on how to obtain one.
13+
# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html
1614
load_dotenv()
17-
llm = AsyncAnthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
15+
chat_model = ChatAnthropic(
16+
api_key=os.environ.get("ANTHROPIC_API_KEY"),
17+
model="claude-3-5-sonnet-latest",
18+
system_prompt="You are a helpful assistant.",
19+
)
20+
1821

1922
# Set some Shiny page options
2023
ui.page_opts(
@@ -23,22 +26,16 @@
2326
fillable_mobile=True,
2427
)
2528

26-
# Create and display empty chat
27-
chat = ui.Chat(id="chat")
29+
# Create and display a Shiny chat component
30+
chat = ui.Chat(
31+
id="chat",
32+
messages=["Hello! How can I help you today?"],
33+
)
2834
chat.ui()
2935

3036

31-
# Define a callback to run when the user submits a message
37+
# Generate a response when the user submits a message
3238
@chat.on_user_submit
33-
async def _():
34-
# Get messages currently in the chat
35-
messages = chat.messages(format="anthropic")
36-
# Create a response message stream
37-
response = await llm.messages.create(
38-
model="claude-3-5-sonnet-latest",
39-
messages=messages,
40-
stream=True,
41-
max_tokens=1000,
42-
)
43-
# Append the response stream into the chat
39+
async def handle_user_input(user_input):
40+
response = chat_model.stream(user_input)
4441
await chat.append_message_stream(response)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
shiny
22
python-dotenv
33
tokenizers
4+
chatlas
45
anthropic
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"type": "app",
3+
"id": "chat-ai-gemini",
4+
"title": "Chat AI using Google Gemini"
5+
}
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# ------------------------------------------------------------------------------------
2+
# A basic Shiny Chat example powered by Google's Gemini model.
3+
# ------------------------------------------------------------------------------------
4+
import os
5+
6+
from app_utils import load_dotenv
7+
from chatlas import ChatGoogle
8+
9+
from shiny.express import ui
10+
11+
# ChatGoogle() requires an API key from Google.
12+
# See the docs for more information on how to obtain one.
13+
# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html
14+
load_dotenv()
15+
chat_model = ChatGoogle(
16+
api_key=os.environ.get("GOOGLE_API_KEY"),
17+
system_prompt="You are a helpful assistant.",
18+
model="gemini-1.5-flash",
19+
)
20+
21+
# Set some Shiny page options
22+
ui.page_opts(
23+
title="Hello Google Gemini Chat",
24+
fillable=True,
25+
fillable_mobile=True,
26+
)
27+
28+
# Create and display empty chat
29+
chat = ui.Chat(id="chat")
30+
chat.ui()
31+
32+
33+
# Generate a response when the user submits a message
34+
@chat.on_user_submit
35+
async def handle_user_input(user_input):
36+
response = chat_model.stream(user_input)
37+
await chat.append_message_stream(response)
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import os
2+
from pathlib import Path
3+
from typing import Any
4+
5+
app_dir = Path(__file__).parent
6+
env_file = app_dir / ".env"
7+
8+
9+
def load_dotenv(dotenv_path: os.PathLike[str] = env_file, **kwargs: Any) -> None:
10+
"""
11+
A convenience wrapper around `dotenv.load_dotenv` that warns if `dotenv` is not installed.
12+
It also returns `None` to make it easier to ignore the return value.
13+
"""
14+
try:
15+
import dotenv
16+
17+
dotenv.load_dotenv(dotenv_path=dotenv_path, **kwargs)
18+
except ImportError:
19+
import warnings
20+
21+
warnings.warn(
22+
"Could not import `dotenv`. If you want to use `.env` files to "
23+
"load environment variables, please install it using "
24+
"`pip install python-dotenv`.",
25+
stacklevel=2,
26+
)
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
shiny
2+
python-dotenv
3+
tokenizers
4+
chatlas
5+
google-generativeai

0 commit comments

Comments
 (0)