|
1 | 1 | from typing import List, Optional, Any, Iterator
|
2 | 2 | from unittest import mock
|
3 |
| -from unittest.mock import Mock |
| 3 | +from unittest.mock import Mock, patch |
4 | 4 |
|
5 | 5 | import pytest
|
6 | 6 |
|
@@ -662,3 +662,79 @@ def test_tools_integration_in_spans(sentry_init, capture_events):
|
662 | 662 |
|
663 | 663 | # Ensure we found at least one span with tools data
|
664 | 664 | assert tools_found, "No spans found with tools data"
|
| 665 | + |
| 666 | + |
| 667 | +def test_langchain_integration_with_langchain_core_only(sentry_init, capture_events): |
| 668 | + """Test that the langchain integration works when langchain.agents.AgentExecutor |
| 669 | + is not available or langchain is not installed, but langchain-core is. |
| 670 | + """ |
| 671 | + |
| 672 | + from langchain_core.outputs import LLMResult, Generation |
| 673 | + |
| 674 | + with patch("sentry_sdk.integrations.langchain.AgentExecutor", None): |
| 675 | + from sentry_sdk.integrations.langchain import ( |
| 676 | + LangchainIntegration, |
| 677 | + SentryLangchainCallback, |
| 678 | + ) |
| 679 | + |
| 680 | + sentry_init( |
| 681 | + integrations=[LangchainIntegration(include_prompts=True)], |
| 682 | + traces_sample_rate=1.0, |
| 683 | + send_default_pii=True, |
| 684 | + ) |
| 685 | + events = capture_events() |
| 686 | + |
| 687 | + try: |
| 688 | + LangchainIntegration.setup_once() |
| 689 | + except Exception as e: |
| 690 | + pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}") |
| 691 | + |
| 692 | + callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) |
| 693 | + |
| 694 | + run_id = "12345678-1234-1234-1234-123456789012" |
| 695 | + serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"} |
| 696 | + prompts = ["What is the capital of France?"] |
| 697 | + |
| 698 | + with start_transaction(): |
| 699 | + callback.on_llm_start( |
| 700 | + serialized=serialized, |
| 701 | + prompts=prompts, |
| 702 | + run_id=run_id, |
| 703 | + invocation_params={ |
| 704 | + "temperature": 0.7, |
| 705 | + "max_tokens": 100, |
| 706 | + "model": "gpt-3.5-turbo", |
| 707 | + }, |
| 708 | + ) |
| 709 | + |
| 710 | + response = LLMResult( |
| 711 | + generations=[[Generation(text="The capital of France is Paris.")]], |
| 712 | + llm_output={ |
| 713 | + "token_usage": { |
| 714 | + "total_tokens": 25, |
| 715 | + "prompt_tokens": 10, |
| 716 | + "completion_tokens": 15, |
| 717 | + } |
| 718 | + }, |
| 719 | + ) |
| 720 | + callback.on_llm_end(response=response, run_id=run_id) |
| 721 | + |
| 722 | + assert len(events) > 0 |
| 723 | + tx = events[0] |
| 724 | + assert tx["type"] == "transaction" |
| 725 | + |
| 726 | + llm_spans = [ |
| 727 | + span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" |
| 728 | + ] |
| 729 | + assert len(llm_spans) > 0 |
| 730 | + |
| 731 | + llm_span = llm_spans[0] |
| 732 | + assert llm_span["description"] == "Langchain LLM call" |
| 733 | + assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" |
| 734 | + assert ( |
| 735 | + llm_span["data"]["gen_ai.response.text"] |
| 736 | + == "The capital of France is Paris." |
| 737 | + ) |
| 738 | + assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 |
| 739 | + assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 |
| 740 | + assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 |
0 commit comments