Skip to content

Commit 627ae5a

Browse files
authored
Merge eb0d15f into 5ec2ac5
2 parents 5ec2ac5 + eb0d15f commit 627ae5a

File tree

3 files changed

+3
-137
lines changed

3 files changed

+3
-137
lines changed

pydatalab/pyproject.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,6 @@ chat = [
8686
"langchain >= 0.2.6, < 0.3",
8787
"langchain-openai ~= 0.1",
8888
"langchain-anthropic ~= 0.1",
89-
"tiktoken ~= 0.7",
90-
"transformers ~= 4.42",
9189
]
9290
deploy = ["gunicorn ~= 23.0"]
9391
all = ["datalab-server[apps,server,chat]"]

pydatalab/src/pydatalab/apps/chat/blocks.py

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
from langchain_anthropic import ChatAnthropic
55
from langchain_core.language_models.chat_models import BaseChatModel
6+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
67
from langchain_openai import ChatOpenAI
78

89
from pydatalab.blocks.base import DataBlock
@@ -165,9 +166,7 @@ def render(self):
165166

166167
try:
167168
model_name = self.data["model"]
168-
169-
model_dict = self.data["available_models"][model_name]
170-
LOGGER.warning(f"Initializing chatblock with model: {model_name}")
169+
LOGGER.debug(f"Initializing chatblock with model: {model_name}")
171170

172171
if model_name.startswith("claude"):
173172
self.chat_client = ChatAnthropic(
@@ -183,8 +182,6 @@ def render(self):
183182
LOGGER.debug(
184183
f'submitting request to API for completion with last message role "{self.data["messages"][-1]["role"]}" (message = {self.data["messages"][-1:]}). Temperature = {self.data["temperature"]} (type {type(self.data["temperature"])})'
185184
)
186-
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
187-
188185
# Convert your messages to the required format
189186
langchain_messages = []
190187
for message in self.data["messages"]:
@@ -195,24 +192,12 @@ def render(self):
195192
else:
196193
langchain_messages.append(AIMessage(content=message["content"]))
197194

198-
token_count = self.chat_client.get_num_tokens_from_messages(langchain_messages)
199-
200-
self.data["token_count"] = token_count
201-
202-
if token_count >= model_dict["context_window"]:
203-
self.data["error_message"] = (
204-
f"""This conversation has reached its maximum context size and the chatbot won't be able to respond further ({token_count} tokens, max: {model_dict["context_window"]}). Please make a new chat block to start fresh, or use a model with a larger context window"""
205-
)
206-
return
207-
208195
# Call the chat client with the invoke method
209196
response = self.chat_client.invoke(langchain_messages)
197+
self.data["token_count"] = response.usage_metadata["total_tokens"]
210198

211199
langchain_messages.append(response)
212200

213-
token_count = self.chat_client.get_num_tokens_from_messages(langchain_messages)
214-
215-
self.data["token_count"] = token_count
216201
self.data["messages"].append({"role": "assistant", "content": response.content})
217202
self.data["error_message"] = None
218203

0 commit comments

Comments
 (0)