Skip to content

Commit 20f0f84

Browse files
authored
fix(langchain): make new langchain integration work with just langchain-core (#4783)
- Catch ImportError for `langchain.agents` if langchain is not present and set AgentExecutor to None so the rest of the logic still keeps working - Add test for recording new OTEL-compliant data for `langchain-core` - Verified AI Agents functionality (span generation, token accounting, etc) with `langchain-core` and `langchain-openai` Closes TET-1126 Closes PY-1833 Closes #4776
1 parent 0eede69 commit 20f0f84

File tree

2 files changed

+82
-2
lines changed

2 files changed

+82
-2
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929

3030

3131
try:
32-
from langchain.agents import AgentExecutor
3332
from langchain_core.agents import AgentFinish
3433
from langchain_core.callbacks import (
3534
BaseCallbackHandler,
@@ -44,6 +43,11 @@
4443
raise DidNotEnable("langchain not installed")
4544

4645

46+
try:
47+
from langchain.agents import AgentExecutor
48+
except ImportError:
49+
AgentExecutor = None
50+
4751
DATA_FIELDS = {
4852
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
4953
"function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,

tests/integrations/langchain/test_langchain.py

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from typing import List, Optional, Any, Iterator
22
from unittest import mock
3-
from unittest.mock import Mock
3+
from unittest.mock import Mock, patch
44

55
import pytest
66

@@ -662,3 +662,79 @@ def test_tools_integration_in_spans(sentry_init, capture_events):
662662

663663
# Ensure we found at least one span with tools data
664664
assert tools_found, "No spans found with tools data"
665+
666+
667+
def test_langchain_integration_with_langchain_core_only(sentry_init, capture_events):
668+
"""Test that the langchain integration works when langchain.agents.AgentExecutor
669+
is not available or langchain is not installed, but langchain-core is.
670+
"""
671+
672+
from langchain_core.outputs import LLMResult, Generation
673+
674+
with patch("sentry_sdk.integrations.langchain.AgentExecutor", None):
675+
from sentry_sdk.integrations.langchain import (
676+
LangchainIntegration,
677+
SentryLangchainCallback,
678+
)
679+
680+
sentry_init(
681+
integrations=[LangchainIntegration(include_prompts=True)],
682+
traces_sample_rate=1.0,
683+
send_default_pii=True,
684+
)
685+
events = capture_events()
686+
687+
try:
688+
LangchainIntegration.setup_once()
689+
except Exception as e:
690+
pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}")
691+
692+
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
693+
694+
run_id = "12345678-1234-1234-1234-123456789012"
695+
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
696+
prompts = ["What is the capital of France?"]
697+
698+
with start_transaction():
699+
callback.on_llm_start(
700+
serialized=serialized,
701+
prompts=prompts,
702+
run_id=run_id,
703+
invocation_params={
704+
"temperature": 0.7,
705+
"max_tokens": 100,
706+
"model": "gpt-3.5-turbo",
707+
},
708+
)
709+
710+
response = LLMResult(
711+
generations=[[Generation(text="The capital of France is Paris.")]],
712+
llm_output={
713+
"token_usage": {
714+
"total_tokens": 25,
715+
"prompt_tokens": 10,
716+
"completion_tokens": 15,
717+
}
718+
},
719+
)
720+
callback.on_llm_end(response=response, run_id=run_id)
721+
722+
assert len(events) > 0
723+
tx = events[0]
724+
assert tx["type"] == "transaction"
725+
726+
llm_spans = [
727+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
728+
]
729+
assert len(llm_spans) > 0
730+
731+
llm_span = llm_spans[0]
732+
assert llm_span["description"] == "Langchain LLM call"
733+
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
734+
assert (
735+
llm_span["data"]["gen_ai.response.text"]
736+
== "The capital of France is Paris."
737+
)
738+
assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25
739+
assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10
740+
assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15

0 commit comments

Comments
 (0)