diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 50ab1d39ce..43f9d296a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,34 +27,12 @@ jobs: - uses: actions/checkout@v5.0.0 - uses: actions/setup-python@v6 with: - python-version: 3.12 + python-version: 3.14 - run: | pip install tox tox -e linters - check-ci-config: - name: Check CI config - runs-on: ubuntu-latest - timeout-minutes: 10 - - steps: - - uses: actions/checkout@v5.0.0 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - - uses: actions/setup-python@v6 - with: - python-version: 3.12 - - - name: Detect unexpected changes to tox.ini or CI - run: | - pip install -e . - pip install -r scripts/populate_tox/requirements.txt - python scripts/populate_tox/populate_tox.py --fail-on-changes - pip install -r scripts/split_tox_gh_actions/requirements.txt - python scripts/split_tox_gh_actions/split_tox_gh_actions.py --fail-on-changes - build_lambda_layer: name: Build Package runs-on: ubuntu-latest diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 74664add46..de0b8217da 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -63,7 +63,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions @@ -77,4 +77,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index cf21720ff1..1b9a341f17 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -58,6 +58,14 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-cohere" + - name: Test google_genai + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-google_genai" + - name: Test huggingface_hub + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-huggingface_hub" - name: Test langchain-base run: | set -x # print commands that are executed @@ -66,6 +74,14 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-langchain-notiktoken" + - name: Test langgraph + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-langgraph" + - name: Test litellm + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-litellm" - name: Test openai-base run: | set -x # print commands that are executed @@ -74,18 +90,10 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-openai-notiktoken" - - name: Test langgraph - run: | - set -x # print commands that are executed - ./scripts/runtox.sh "py${{ matrix.python-version }}-langgraph" - name: Test openai_agents run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-openai_agents" - - name: Test huggingface_hub - run: | - set -x # print commands that are executed - ./scripts/runtox.sh "py${{ matrix.python-version }}-huggingface_hub" - name: Generate coverage XML (Python 3.6) if: ${{ !cancelled() && matrix.python-version == '3.6' }} run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 941aec99e6..4d1119ddde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,88 @@ # Changelog +## 2.42.0 + +### Various fixes & improvements + +- feat: Add source information for slow outgoing HTTP requests (#4902) by @alexander-alderman-webb +- tests: Update tox (#4913) by @sentrivana +- fix(Ray): Retain the original function name when patching Ray tasks (#4858) by @svartalf +- feat(ai): Add `python-genai` integration (#4891) by @vgrozdanic + Enable the new Google GenAI integration with the code snippet below, and you can use the Sentry AI dashboards to observe your AI calls: + + ```python + import sentry_sdk + from sentry_sdk.integrations.google_genai import GoogleGenAIIntegration + sentry_sdk.init( + dsn="", + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for tracing. + traces_sample_rate=1.0, + # Add data like inputs and responses; + # see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info + send_default_pii=True, + integrations=[ + GoogleGenAIIntegration(), + ], + ) + ``` + +## 2.41.0 + +### Various fixes & improvements + +- feat: Add `concurrent.futures` patch to threading integration (#4770) by @alexander-alderman-webb + + The SDK now makes sure to automatically preserve span relationships when using `ThreadPoolExecutor`. +- chore: Remove old metrics code (#4899) by @sentrivana + + Removed all code related to the deprecated experimental metrics feature (`sentry_sdk.metrics`). +- ref: Remove "experimental" from log function name (#4901) by @sentrivana +- fix(ai): Add mapping for gen_ai message roles (#4884) by @shellmayr +- feat(metrics): Add trace metrics behind an experiments flag (#4898) by @k-fish + +## 2.40.0 + +### Various fixes & improvements + +- Add LiteLLM integration (#4864) by @constantinius + Once you've enabled the [new LiteLLM integration](https://docs.sentry.io/platforms/python/integrations/litellm/), you can use the Sentry AI Agents Monitoring, a Sentry dashboard that helps you understand what's going on with your AI requests: + + ```python + import sentry_sdk + from sentry_sdk.integrations.litellm import LiteLLMIntegration + sentry_sdk.init( + dsn="", + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for tracing. + traces_sample_rate=1.0, + # Add data like inputs and responses; + # see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info + send_default_pii=True, + integrations=[ + LiteLLMIntegration(), + ], + ) + ``` + +- Litestar: Copy request info to prevent cookies mutation (#4883) by @alexander-alderman-webb +- Add tracing to `DramatiqIntegration` (#4571) by @Igreh +- Also emit spans for MCP tool calls done by the LLM (#4875) by @constantinius +- Option to not trace HTTP requests based on status codes (#4869) by @alexander-alderman-webb + You can now disable transactions for incoming requests with specific HTTP status codes. The [new `trace_ignore_status_codes` option](https://docs.sentry.io/platforms/python/configuration/options/#trace_ignore_status_codes) accepts a `set` of status codes as integers. If a transaction wraps a request that results in one of the provided status codes, the transaction will be unsampled. + + ```python + import sentry_sdk + + sentry_sdk.init( + trace_ignore_status_codes={301, 302, 303, *range(305, 400), 404}, + ) + ``` + +- Move `_set_agent_data` call to `ai_client_span` function (#4876) by @constantinius +- Add script to determine lowest supported versions (#4867) by @sentrivana +- Update `CONTRIBUTING.md` (#4870) by @sentrivana + ## 2.39.0 ### Various fixes & improvements diff --git a/docs/conf.py b/docs/conf.py index 292e0971e2..2d54f45170 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -31,7 +31,7 @@ copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year) author = "Sentry Team and Contributors" -release = "2.39.0" +release = "2.42.0" version = ".".join(release.split(".")[:2]) # The short X.Y version. diff --git a/pyproject.toml b/pyproject.toml index 8e6fe345f4..4441660c50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,10 @@ ignore_missing_imports = true module = "langgraph.*" ignore_missing_imports = true +[[tool.mypy.overrides]] +module = "google.genai.*" +ignore_missing_imports = true + [[tool.mypy.overrides]] module = "executing.*" ignore_missing_imports = true @@ -179,6 +183,10 @@ ignore_missing_imports = true module = "agents.*" ignore_missing_imports = true +[[tool.mypy.overrides]] +module = "dramatiq.*" +ignore_missing_imports = true + # # Tool: Ruff (linting and formatting) # diff --git a/scripts/populate_tox/README.md b/scripts/populate_tox/README.md index 9bdb3567b8..d6c4e52147 100644 --- a/scripts/populate_tox/README.md +++ b/scripts/populate_tox/README.md @@ -14,7 +14,7 @@ combination of hardcoded and generated entries. The `populate_tox.py` script fills out the auto-generated part of that template. It does this by querying PyPI for each framework's package and its metadata and -then determining which versions make sense to test to get good coverage. +then determining which versions it makes sense to test to get good coverage. By default, the lowest supported and latest version of a framework are always tested, with a number of releases in between: @@ -22,17 +22,16 @@ tested, with a number of releases in between: - If the package doesn't have multiple majors, we pick two versions in between lowest and highest. -#### Caveats +Each test suite requires at least some configuration to be added to +`TEST_SUITE_CONFIG` in `scripts/populate_tox/config.py`. If you're adding a new +integration, check out the [Add a new test suite](#add-a-new-test-suite) section. -- Make sure the integration name is the same everywhere. If it consists of - multiple words, use an underscore instead of a hyphen. +## Test suite config -## Defining constraints - -The `TEST_SUITE_CONFIG` dictionary defines, for each integration test suite, -the main package (framework, library) to test with; any additional test -dependencies, optionally gated behind specific conditions; and optionally -the Python versions to test on. +The `TEST_SUITE_CONFIG` dictionary in `scripts/populate_tox/config.py` defines, +for each integration test suite, the main package (framework, library) to test +with; any additional test dependencies, optionally gated behind specific +conditions; and optionally the Python versions to test on. Constraints are defined using the format specified below. The following sections describe each key. @@ -58,7 +57,7 @@ in [packaging.specifiers](https://packaging.pypa.io/en/stable/specifiers.html). ### `package` -The name of the third party package as it's listed on PyPI. The script will +The name of the third-party package as it's listed on PyPI. The script will be picking different versions of this package to test. This key is mandatory. @@ -69,7 +68,7 @@ The test dependencies of the test suite. They're defined as a dictionary of `rule: [package1, package2, ...]` key-value pairs. All packages in the package list of a rule will be installed as long as the rule applies. -`rule`s are predefined. Each `rule` must be one of the following: +Each `rule` must be one of the following: - `*`: packages will be always installed - a version specifier on the main package (e.g. `<=0.32`): packages will only be installed if the main package falls into the version bounds specified @@ -77,7 +76,7 @@ in the package list of a rule will be installed as long as the rule applies. installed if the Python version matches one from the list Rules can be used to specify version bounds on older versions of the main -package's dependencies, for example. If e.g. Flask tests generally need +package's dependencies, for example. If Flask tests generally need Werkzeug and don't care about its version, but Flask older than 3.0 needs a specific Werkzeug version to work, you can say: @@ -176,7 +175,7 @@ be expressed like so: ### `integration_name` Sometimes, the name of the test suite doesn't match the name of the integration. -For example, we have the `openai_base` and `openai_notiktoken` test suites, both +For example, we have the `openai-base` and `openai-notiktoken` test suites, both of which are actually testing the `openai` integration. If this is the case, you can use the `integration_name` key to define the name of the integration. If not provided, it will default to the name of the test suite. @@ -193,6 +192,11 @@ greater than 2, as the oldest and latest supported versions will always be picked. Additionally, if there is a recent prerelease, it'll also always be picked (this doesn't count towards `num_versions`). +For instance, `num_versions` set to `2` will only test the first supported and +the last release of the package. `num_versions` equal to `3` will test the first +supported, the last release, and one release in between; `num_versions` set to `4` +will test an additional release in between. In all these cases, if there is +a recent prerelease, it'll be picked as well in addition to the picked versions. ## How-Tos @@ -202,9 +206,10 @@ picked (this doesn't count towards `num_versions`). in `integrations/__init__.py`. This should be the lowest version of the framework that we can guarantee works with the SDK. If you've just added the integration, you should generally set this to the latest version of the framework - at the time. + at the time, unless you've verified the integration works for earlier versions + as well. 2. Add the integration and any constraints to `TEST_SUITE_CONFIG`. See the - "Defining constraints" section for the format. + [Test suite config](#test-suite-config) section for the format. 3. Add the integration to one of the groups in the `GROUPS` dictionary in `scripts/split_tox_gh_actions/split_tox_gh_actions.py`. 4. Run `scripts/generate-test-files.sh` and commit the changes. diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index 34ae680fad..1f23b3fb08 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -1,7 +1,6 @@ # The TEST_SUITE_CONFIG dictionary defines, for each integration test suite, -# the main package (framework, library) to test with; any additional test -# dependencies, optionally gated behind specific conditions; and optionally -# the Python versions to test on. +# at least the main package (framework, library) to test with. Additional +# test dependencies, Python versions to test on, etc. can also be defined here. # # See scripts/populate_tox/README.md for more info on the format and examples. @@ -143,6 +142,13 @@ "package": "gql[all]", "num_versions": 2, }, + "google_genai": { + "package": "google-genai", + "deps": { + "*": ["pytest-asyncio"], + }, + "python": ">=3.9", + }, "graphene": { "package": "graphene", "deps": { @@ -183,9 +189,8 @@ "huggingface_hub": { "package": "huggingface_hub", "deps": { - "*": ["responses"], + "*": ["responses", "pytest-httpx"], }, - "include": "<1.0", }, "langchain-base": { "package": "langchain", @@ -214,6 +219,9 @@ "package": "launchdarkly-server-sdk", "num_versions": 2, }, + "litellm": { + "package": "litellm", + }, "litestar": { "package": "litestar", "deps": { @@ -232,7 +240,10 @@ "*": ["pytest-asyncio", "tiktoken"], "<1.55": ["httpx<0.28"], }, - "python": ">=3.8", + "python": { + ">0.0,<2.3": ">=3.8", + ">=2.3": ">=3.9", + }, }, "openai-notiktoken": { "package": "openai", @@ -241,7 +252,10 @@ "*": ["pytest-asyncio"], "<1.55": ["httpx<0.28"], }, - "python": ">=3.8", + "python": { + ">0.0,<2.3": ">=3.8", + ">=2.3": ">=3.9", + }, }, "openai_agents": { "package": "openai-agents", diff --git a/scripts/populate_tox/populate_tox.py b/scripts/populate_tox/populate_tox.py index c0bf7f1a9f..453823f39d 100644 --- a/scripts/populate_tox/populate_tox.py +++ b/scripts/populate_tox/populate_tox.py @@ -130,7 +130,8 @@ def _save_to_cache(package: str, version: Version, release: Optional[dict]) -> N def _prefilter_releases( - integration: str, releases: dict[str, dict], older_than: Optional[datetime] = None + integration: str, + releases: dict[str, dict], ) -> tuple[list[Version], Optional[Version]]: """ Filter `releases`, removing releases that are for sure unsupported. @@ -178,9 +179,6 @@ def _prefilter_releases( uploaded = datetime.fromisoformat(meta["upload_time_iso_8601"]) - if older_than is not None and uploaded > older_than: - continue - if CUTOFF is not None and uploaded < CUTOFF: continue @@ -224,7 +222,7 @@ def _prefilter_releases( def get_supported_releases( - integration: str, pypi_data: dict, older_than: Optional[datetime] = None + integration: str, pypi_data: dict ) -> tuple[list[Version], Optional[Version]]: """ Get a list of releases that are currently supported by the SDK. @@ -236,9 +234,6 @@ def get_supported_releases( We return the list of supported releases and optionally also the newest prerelease, if it should be tested (meaning it's for a version higher than the current stable version). - - If an `older_than` timestamp is provided, no release newer than that will be - considered. """ package = pypi_data["info"]["name"] @@ -246,7 +241,8 @@ def get_supported_releases( # (because that might require an additional API call for some # of the releases) releases, latest_prerelease = _prefilter_releases( - integration, pypi_data["releases"], older_than + integration, + pypi_data["releases"], ) def _supports_lowest(release: Version) -> bool: @@ -665,32 +661,10 @@ def _normalize_release(release: dict) -> dict: return normalized -def main(fail_on_changes: bool = False) -> dict[str, list]: +def main() -> dict[str, list]: """ Generate tox.ini from the tox.jinja template. - - The script has two modes of operation: - - fail on changes mode (if `fail_on_changes` is True) - - normal mode (if `fail_on_changes` is False) - - Fail on changes mode is run on every PR to make sure that `tox.ini`, - `tox.jinja` and this script don't go out of sync because of manual changes - in one place but not the other. - - Normal mode is meant to be run as a cron job, regenerating tox.ini and - proposing the changes via a PR. """ - print(f"Running in {'fail_on_changes' if fail_on_changes else 'normal'} mode.") - last_updated = get_last_updated() - if fail_on_changes: - # We need to make the script ignore any new releases after the last updated - # timestamp so that we don't fail CI on a PR just because a new package - # version was released, leading to unrelated changes in tox.ini. - print( - f"Since we're in fail_on_changes mode, we're only considering " - f"releases before the last tox.ini update at {last_updated.isoformat()}." - ) - global MIN_PYTHON_VERSION, MAX_PYTHON_VERSION meta = _fetch_sdk_metadata() sdk_python_versions = _parse_python_versions_from_classifiers( @@ -736,12 +710,7 @@ def main(fail_on_changes: bool = False) -> dict[str, list]: # Get the list of all supported releases - # If in fail-on-changes mode, ignore releases newer than `last_updated` - older_than = last_updated if fail_on_changes else None - - releases, latest_prerelease = get_supported_releases( - integration, pypi_data, older_than - ) + releases, latest_prerelease = get_supported_releases(integration, pypi_data) if not releases: print(" Found no supported releases.") @@ -778,9 +747,6 @@ def main(fail_on_changes: bool = False) -> dict[str, list]: } ) - if fail_on_changes: - old_file_hash = get_file_hash() - write_tox_file(packages) # Sort the release cache file @@ -798,36 +764,13 @@ def main(fail_on_changes: bool = False) -> dict[str, list]: ): releases_cache.write(json.dumps(release) + "\n") - if fail_on_changes: - new_file_hash = get_file_hash() - if old_file_hash != new_file_hash: - raise RuntimeError( - dedent( - """ - Detected that `tox.ini` is out of sync with - `scripts/populate_tox/tox.jinja` and/or - `scripts/populate_tox/populate_tox.py`. This might either mean - that `tox.ini` was changed manually, or the `tox.jinja` - template and/or the `populate_tox.py` script were changed without - regenerating `tox.ini`. - - Please don't make manual changes to `tox.ini`. Instead, make the - changes to the `tox.jinja` template and/or the `populate_tox.py` - script (as applicable) and regenerate the `tox.ini` file by - running scripts/generate-test-files.sh - """ - ) - ) - print("Done checking tox.ini. Looking good!") - else: - print( - "Done generating tox.ini. Make sure to also update the CI YAML " - "files to reflect the new test targets." - ) + print( + "Done generating tox.ini. Make sure to also update the CI YAML " + "files to reflect the new test targets." + ) return packages if __name__ == "__main__": - fail_on_changes = len(sys.argv) == 2 and sys.argv[1] == "--fail-on-changes" - main(fail_on_changes) + main() diff --git a/scripts/populate_tox/releases.jsonl b/scripts/populate_tox/releases.jsonl index 3532b61c75..2ff66f2b18 100644 --- a/scripts/populate_tox/releases.jsonl +++ b/scripts/populate_tox/releases.jsonl @@ -5,8 +5,8 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.5", "version": "2.2.28", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.6", "version": "3.1.14", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.6", "version": "3.2.25", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.8", "version": "4.2.24", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.10", "version": "5.2.6", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.8", "version": "4.2.25", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.10", "version": "5.2.7", "yanked": false}} {"info": {"classifiers": ["Development Status :: 3 - Alpha", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Django", "requires_python": ">=3.12", "version": "6.0a1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Flask", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "Flask", "requires_python": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", "version": "1.1.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Flask", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "Flask", "requires_python": ">=3.8", "version": "2.3.3", "yanked": false}} @@ -16,17 +16,17 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "name": "SQLAlchemy", "requires_python": "", "version": "1.2.19", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "name": "SQLAlchemy", "requires_python": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", "version": "1.3.24", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "name": "SQLAlchemy", "requires_python": "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7", "version": "1.4.54", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "name": "SQLAlchemy", "requires_python": ">=3.7", "version": "2.0.43", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "name": "SQLAlchemy", "requires_python": ">=3.7", "version": "2.0.44", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Typing :: Typed"], "name": "UnleashClient", "requires_python": ">=3.8", "version": "6.0.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Typing :: Typed"], "name": "UnleashClient", "requires_python": ">=3.8", "version": "6.3.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "aiohttp", "requires_python": ">=3.8", "version": "3.10.11", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "aiohttp", "requires_python": ">=3.9", "version": "3.12.15", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "aiohttp", "requires_python": ">=3.9", "version": "3.13.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Internet :: WWW/HTTP"], "name": "aiohttp", "requires_python": ">=3.5.3", "version": "3.4.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "aiohttp", "requires_python": ">=3.6", "version": "3.7.4", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.7", "version": "0.16.0", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.7", "version": "0.33.1", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.8", "version": "0.50.0", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.8", "version": "0.68.1", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.7", "version": "0.34.2", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.8", "version": "0.52.2", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "anthropic", "requires_python": ">=3.8", "version": "0.69.0", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: End Users/Desktop", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "apache-beam", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*", "version": "2.12.0", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: End Users/Desktop", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "apache-beam", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*", "version": "2.13.0", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: End Users/Desktop", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "apache-beam", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*", "version": "2.14.0", "yanked": false}} @@ -46,7 +46,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7"], "name": "boto3", "requires_python": "", "version": "1.12.49", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">= 3.6", "version": "1.20.54", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">= 3.7", "version": "1.28.85", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">=3.9", "version": "1.40.40", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">=3.9", "version": "1.40.50", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "bottle", "requires_python": "", "version": "0.12.25", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "bottle", "requires_python": null, "version": "0.13.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Object Brokering", "Topic :: System :: Distributed Computing"], "name": "celery", "requires_python": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", "version": "4.4.7", "yanked": false}} @@ -66,9 +66,13 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "falcon", "requires_python": ">=3.5", "version": "3.1.3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Typing :: Typed"], "name": "falcon", "requires_python": ">=3.8", "version": "4.1.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.105.0", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Framework :: Pydantic :: 2", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.118.0", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Framework :: Pydantic :: 2", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.119.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.6.1", "version": "0.79.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.7", "version": "0.92.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.29.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.34.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.39.1", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.43.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": "", "version": "3.4.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": ">=3.8.1", "version": "4.0.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": ">=3.8.1", "version": "4.2.0b0", "yanked": false}} @@ -78,6 +82,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "grpcio", "requires_python": ">=3.6", "version": "1.47.5", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "grpcio", "requires_python": ">=3.7", "version": "1.62.3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "grpcio", "requires_python": ">=3.9", "version": "1.75.1", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "grpcio", "requires_python": ">=3.9", "version": "1.76.0rc1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: Trio", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "httpx", "requires_python": ">=3.6", "version": "0.16.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: Trio", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "httpx", "requires_python": ">=3.6", "version": "0.20.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: Trio", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP"], "name": "httpx", "requires_python": ">=3.6", "version": "0.22.0", "yanked": false}} @@ -93,27 +98,34 @@ {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.24.7", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.28.1", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.32.6", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.35.1", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.35.3", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.9.0", "version": "1.0.0rc5", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.9"], "name": "langchain", "requires_python": "<4.0,>=3.8.1", "version": "0.1.20", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.9"], "name": "langchain", "requires_python": "<4.0,>=3.8.1", "version": "0.2.17", "yanked": false}} {"info": {"classifiers": [], "name": "langchain", "requires_python": "<4.0,>=3.9", "version": "0.3.27", "yanked": false}} -{"info": {"classifiers": [], "name": "langgraph", "requires_python": ">=3.9", "version": "0.6.7", "yanked": false}} -{"info": {"classifiers": [], "name": "langgraph", "requires_python": ">=3.10", "version": "1.0.0a3", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries"], "name": "launchdarkly-server-sdk", "requires_python": ">=3.9", "version": "9.12.0", "yanked": false}} +{"info": {"classifiers": [], "name": "langgraph", "requires_python": ">=3.9", "version": "0.6.10", "yanked": false}} +{"info": {"classifiers": [], "name": "langgraph", "requires_python": ">=3.10", "version": "1.0.0a4", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries"], "name": "launchdarkly-server-sdk", "requires_python": ">=3.9", "version": "9.12.1", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries"], "name": "launchdarkly-server-sdk", "requires_python": ">=3.8", "version": "9.8.1", "yanked": false}} +{"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9"], "name": "litellm", "requires_python": "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8", "version": "1.77.7", "yanked": false}} +{"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9"], "name": "litellm", "requires_python": "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8", "version": "1.78.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "litestar", "requires_python": ">=3.8,<4.0", "version": "2.0.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "litestar", "requires_python": "<4.0,>=3.8", "version": "2.12.1", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "litestar", "requires_python": "<4.0,>=3.8", "version": "2.17.0", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "litestar", "requires_python": "<4.0,>=3.8", "version": "2.18.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "litestar", "requires_python": "<4.0,>=3.8", "version": "2.6.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: System :: Logging"], "name": "loguru", "requires_python": "<4.0,>=3.5", "version": "0.7.3", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.7.1", "version": "1.0.1", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.100.2", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.107.3", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.109.1", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.7.1", "version": "1.37.2", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.73.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.57.4", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "1.86.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "2.1.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai", "requires_python": ">=3.8", "version": "2.3.0", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai-agents", "requires_python": ">=3.9", "version": "0.0.19", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai-agents", "requires_python": ">=3.9", "version": "0.1.0", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai-agents", "requires_python": ">=3.9", "version": "0.2.11", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai-agents", "requires_python": ">=3.9", "version": "0.3.2", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "openai-agents", "requires_python": ">=3.9", "version": "0.3.3", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "name": "openfeature-sdk", "requires_python": ">=3.8", "version": "0.7.5", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "name": "openfeature-sdk", "requires_python": ">=3.9", "version": "0.8.3", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8"], "name": "pure-eval", "requires_python": "", "version": "0.0.3", "yanked": false}} @@ -126,7 +138,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": "", "version": "3.5.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": "", "version": "3.6.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": ">=3.6", "version": "4.0.2", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", "Typing :: Typed"], "name": "pymongo", "requires_python": ">=3.9", "version": "4.15.1", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", "Typing :: Typed"], "name": "pymongo", "requires_python": ">=3.9", "version": "4.15.3", "yanked": false}} {"info": {"classifiers": ["Framework :: Pylons", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": null, "version": "1.0.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 6 - Mature", "Framework :: Pyramid", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", "version": "1.10.8", "yanked": false}} {"info": {"classifiers": ["Development Status :: 6 - Mature", "Framework :: Pyramid", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": "", "version": "1.6.5", "yanked": false}} @@ -141,7 +153,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Typing :: Typed"], "name": "pyspark", "requires_python": ">=3.6", "version": "3.1.3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Typing :: Typed"], "name": "pyspark", "requires_python": ">=3.8", "version": "3.5.7", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Typing :: Typed"], "name": "pyspark", "requires_python": ">=3.9", "version": "4.0.1", "yanked": false}} -{"info": {"classifiers": ["Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9"], "name": "ray", "requires_python": ">=3.9", "version": "2.49.2", "yanked": false}} +{"info": {"classifiers": ["Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9"], "name": "ray", "requires_python": ">=3.9", "version": "2.50.0", "yanked": false}} {"info": {"classifiers": ["Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "ray", "requires_python": "", "version": "2.7.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python"], "name": "redis", "requires_python": null, "version": "0.6.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "name": "redis", "requires_python": "", "version": "2.10.6", "yanked": false}} @@ -153,7 +165,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.7", "version": "4.6.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.8", "version": "5.3.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "6.4.0", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "7.0.0b2", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "7.0.0b3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 3 - Alpha", "Environment :: Web Environment", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "name": "redis-py-cluster", "requires_python": null, "version": "0.1.0", "yanked": false}} {"info": {"classifiers": [], "name": "redis-py-cluster", "requires_python": null, "version": "1.1.0", "yanked": false}} {"info": {"classifiers": [], "name": "redis-py-cluster", "requires_python": null, "version": "1.2.0", "yanked": false}} @@ -187,9 +199,9 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Typing :: Typed"], "name": "starlite", "requires_python": ">=3.8,<4.0", "version": "1.48.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Typing :: Typed"], "name": "starlite", "requires_python": "<4.0,>=3.8", "version": "1.51.16", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries"], "name": "statsig", "requires_python": ">=3.7", "version": "0.55.3", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries"], "name": "statsig", "requires_python": ">=3.7", "version": "0.64.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries"], "name": "statsig", "requires_python": ">=3.7", "version": "0.65.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": ">=3.8,<4.0", "version": "0.209.8", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": "<4.0,>=3.9", "version": "0.282.0", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": "<4.0,>=3.9", "version": "0.283.3", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "tornado", "requires_python": ">= 3.5", "version": "6.0.4", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "tornado", "requires_python": ">=3.9", "version": "6.5.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: No Input/Output (Daemon)", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License (GPL)", "Natural Language :: English", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Spanish", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": null, "version": "1.2.10", "yanked": false}} @@ -201,6 +213,6 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Framework :: Tryton", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: Bulgarian", "Natural Language :: Catalan", "Natural Language :: Chinese (Simplified)", "Natural Language :: Czech", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: Finnish", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Hungarian", "Natural Language :: Indonesian", "Natural Language :: Italian", "Natural Language :: Persian", "Natural Language :: Polish", "Natural Language :: Portuguese (Brazilian)", "Natural Language :: Russian", "Natural Language :: Slovenian", "Natural Language :: Spanish", "Natural Language :: Turkish", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": ">=3.6", "version": "5.8.16", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Framework :: Tryton", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: Bulgarian", "Natural Language :: Catalan", "Natural Language :: Chinese (Simplified)", "Natural Language :: Czech", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: Finnish", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Hungarian", "Natural Language :: Indonesian", "Natural Language :: Italian", "Natural Language :: Persian", "Natural Language :: Polish", "Natural Language :: Portuguese (Brazilian)", "Natural Language :: Romanian", "Natural Language :: Russian", "Natural Language :: Slovenian", "Natural Language :: Spanish", "Natural Language :: Turkish", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": ">=3.6", "version": "6.2.14", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Framework :: Tryton", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: Bulgarian", "Natural Language :: Catalan", "Natural Language :: Chinese (Simplified)", "Natural Language :: Czech", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: Finnish", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Hungarian", "Natural Language :: Indonesian", "Natural Language :: Italian", "Natural Language :: Persian", "Natural Language :: Polish", "Natural Language :: Portuguese (Brazilian)", "Natural Language :: Romanian", "Natural Language :: Russian", "Natural Language :: Slovenian", "Natural Language :: Spanish", "Natural Language :: Turkish", "Natural Language :: Ukrainian", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": ">=3.8", "version": "6.8.17", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Framework :: Tryton", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: Bulgarian", "Natural Language :: Catalan", "Natural Language :: Chinese (Simplified)", "Natural Language :: Czech", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: Finnish", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Hungarian", "Natural Language :: Indonesian", "Natural Language :: Italian", "Natural Language :: Persian", "Natural Language :: Polish", "Natural Language :: Portuguese (Brazilian)", "Natural Language :: Romanian", "Natural Language :: Russian", "Natural Language :: Slovenian", "Natural Language :: Spanish", "Natural Language :: Turkish", "Natural Language :: Ukrainian", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": ">=3.9", "version": "7.6.7", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: No Input/Output (Daemon)", "Framework :: Tryton", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: Bulgarian", "Natural Language :: Catalan", "Natural Language :: Chinese (Simplified)", "Natural Language :: Czech", "Natural Language :: Dutch", "Natural Language :: English", "Natural Language :: Finnish", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Hungarian", "Natural Language :: Indonesian", "Natural Language :: Italian", "Natural Language :: Persian", "Natural Language :: Polish", "Natural Language :: Portuguese (Brazilian)", "Natural Language :: Romanian", "Natural Language :: Russian", "Natural Language :: Slovenian", "Natural Language :: Spanish", "Natural Language :: Turkish", "Natural Language :: Ukrainian", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": ">=3.9", "version": "7.6.8", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "typer", "requires_python": ">=3.7", "version": "0.15.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "typer", "requires_python": ">=3.8", "version": "0.19.2", "yanked": false}} diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index 2a33e7790d..b86da57c24 100755 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -1,14 +1,16 @@ -# Tox (http://codespeak.net/~hpk/tox/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. +# DON'T EDIT THIS FILE BY HAND. This file has been generated from a template by +# `scripts/populate_tox/populate_tox.py`. # -# This file has been generated from a template -# by "scripts/populate_tox/populate_tox.py". Any changes to the file should -# be made in the template (if you want to change a hardcoded part of the file) -# or in the script (if you want to change the auto-generated part). -# The file (and all resulting CI YAMLs) then needs to be regenerated via -# "scripts/generate-test-files.sh". +# Any changes to the test matrix should be made +# - either in the script config in `scripts/populate_tox/config.py` (if you want +# to change the auto-generated part) +# - or in the template in `scripts/populate_tox/tox.jinja` (if you want to change +# a hardcoded part of the file) +# +# This file (and all resulting CI YAMLs) then needs to be regenerated via +# `scripts/generate-test-files.sh`. +# +# See also `scripts/populate_tox/README.md` for more info. [tox] requires = @@ -179,7 +181,7 @@ basepython = # Python version is pinned here for consistency across environments. # Tools like ruff and mypy have options that pin the target Python # version (configured in pyproject.toml), ensuring consistent behavior. - linters: python3.12 + linters: python3.14 commands = {py3.7,py3.8}-boto3: pip install urllib3<2.0.0 diff --git a/scripts/split_tox_gh_actions/split_tox_gh_actions.py b/scripts/split_tox_gh_actions/split_tox_gh_actions.py index a7b7c394b1..9dea95842b 100755 --- a/scripts/split_tox_gh_actions/split_tox_gh_actions.py +++ b/scripts/split_tox_gh_actions/split_tox_gh_actions.py @@ -72,13 +72,15 @@ "AI": [ "anthropic", "cohere", + "google_genai", + "huggingface_hub", "langchain-base", "langchain-notiktoken", + "langgraph", + "litellm", "openai-base", "openai-notiktoken", - "langgraph", "openai_agents", - "huggingface_hub", ], "Cloud": [ "aws_lambda", diff --git a/sentry_sdk/_metrics.py b/sentry_sdk/_metrics.py new file mode 100644 index 0000000000..03bde137bd --- /dev/null +++ b/sentry_sdk/_metrics.py @@ -0,0 +1,81 @@ +""" +NOTE: This file contains experimental code that may be changed or removed at any +time without prior notice. +""" + +import time +from typing import Any, Optional, TYPE_CHECKING, Union + +import sentry_sdk +from sentry_sdk.utils import safe_repr + +if TYPE_CHECKING: + from sentry_sdk._types import Metric, MetricType + + +def _capture_metric( + name, # type: str + metric_type, # type: MetricType + value, # type: float + unit=None, # type: Optional[str] + attributes=None, # type: Optional[dict[str, Any]] +): + # type: (...) -> None + client = sentry_sdk.get_client() + + attrs = {} # type: dict[str, Union[str, bool, float, int]] + if attributes: + for k, v in attributes.items(): + attrs[k] = ( + v + if ( + isinstance(v, str) + or isinstance(v, int) + or isinstance(v, bool) + or isinstance(v, float) + ) + else safe_repr(v) + ) + + metric = { + "timestamp": time.time(), + "trace_id": None, + "span_id": None, + "name": name, + "type": metric_type, + "value": float(value), + "unit": unit, + "attributes": attrs, + } # type: Metric + + client._capture_metric(metric) + + +def count( + name, # type: str + value, # type: float + unit=None, # type: Optional[str] + attributes=None, # type: Optional[dict[str, Any]] +): + # type: (...) -> None + _capture_metric(name, "counter", value, unit, attributes) + + +def gauge( + name, # type: str + value, # type: float + unit=None, # type: Optional[str] + attributes=None, # type: Optional[dict[str, Any]] +): + # type: (...) -> None + _capture_metric(name, "gauge", value, unit, attributes) + + +def distribution( + name, # type: str + value, # type: float + unit=None, # type: Optional[str] + attributes=None, # type: Optional[dict[str, Any]] +): + # type: (...) -> None + _capture_metric(name, "distribution", value, unit, attributes) diff --git a/sentry_sdk/_metrics_batcher.py b/sentry_sdk/_metrics_batcher.py new file mode 100644 index 0000000000..fd9a5d732b --- /dev/null +++ b/sentry_sdk/_metrics_batcher.py @@ -0,0 +1,156 @@ +import os +import random +import threading +from datetime import datetime, timezone +from typing import Optional, List, Callable, TYPE_CHECKING, Any, Union + +from sentry_sdk.utils import format_timestamp, safe_repr +from sentry_sdk.envelope import Envelope, Item, PayloadRef + +if TYPE_CHECKING: + from sentry_sdk._types import Metric + + +class MetricsBatcher: + MAX_METRICS_BEFORE_FLUSH = 100 + FLUSH_WAIT_TIME = 5.0 + + def __init__( + self, + capture_func, # type: Callable[[Envelope], None] + ): + # type: (...) -> None + self._metric_buffer = [] # type: List[Metric] + self._capture_func = capture_func + self._running = True + self._lock = threading.Lock() + + self._flush_event = threading.Event() # type: threading.Event + + self._flusher = None # type: Optional[threading.Thread] + self._flusher_pid = None # type: Optional[int] + + def _ensure_thread(self): + # type: (...) -> bool + if not self._running: + return False + + pid = os.getpid() + if self._flusher_pid == pid: + return True + + with self._lock: + if self._flusher_pid == pid: + return True + + self._flusher_pid = pid + + self._flusher = threading.Thread(target=self._flush_loop) + self._flusher.daemon = True + + try: + self._flusher.start() + except RuntimeError: + self._running = False + return False + + return True + + def _flush_loop(self): + # type: (...) -> None + while self._running: + self._flush_event.wait(self.FLUSH_WAIT_TIME + random.random()) + self._flush_event.clear() + self._flush() + + def add( + self, + metric, # type: Metric + ): + # type: (...) -> None + if not self._ensure_thread() or self._flusher is None: + return None + + with self._lock: + self._metric_buffer.append(metric) + if len(self._metric_buffer) >= self.MAX_METRICS_BEFORE_FLUSH: + self._flush_event.set() + + def kill(self): + # type: (...) -> None + if self._flusher is None: + return + + self._running = False + self._flush_event.set() + self._flusher = None + + def flush(self): + # type: (...) -> None + self._flush() + + @staticmethod + def _metric_to_transport_format(metric): + # type: (Metric) -> Any + def format_attribute(val): + # type: (Union[int, float, str, bool]) -> Any + if isinstance(val, bool): + return {"value": val, "type": "boolean"} + if isinstance(val, int): + return {"value": val, "type": "integer"} + if isinstance(val, float): + return {"value": val, "type": "double"} + if isinstance(val, str): + return {"value": val, "type": "string"} + return {"value": safe_repr(val), "type": "string"} + + res = { + "timestamp": metric["timestamp"], + "trace_id": metric["trace_id"], + "name": metric["name"], + "type": metric["type"], + "value": metric["value"], + "attributes": { + k: format_attribute(v) for (k, v) in metric["attributes"].items() + }, + } + + if metric.get("span_id") is not None: + res["span_id"] = metric["span_id"] + + if metric.get("unit") is not None: + res["unit"] = metric["unit"] + + return res + + def _flush(self): + # type: (...) -> Optional[Envelope] + + envelope = Envelope( + headers={"sent_at": format_timestamp(datetime.now(timezone.utc))} + ) + with self._lock: + if len(self._metric_buffer) == 0: + return None + + envelope.add_item( + Item( + type="trace_metric", + content_type="application/vnd.sentry.items.trace-metric+json", + headers={ + "item_count": len(self._metric_buffer), + }, + payload=PayloadRef( + json={ + "items": [ + self._metric_to_transport_format(metric) + for metric in self._metric_buffer + ] + } + ), + ) + ) + self._metric_buffer.clear() + + self._capture_func(envelope) + return envelope diff --git a/sentry_sdk/_types.py b/sentry_sdk/_types.py index b28c7260ce..66ed7df4f7 100644 --- a/sentry_sdk/_types.py +++ b/sentry_sdk/_types.py @@ -210,7 +210,6 @@ class SDKInfo(TypedDict): "type": Literal["check_in", "transaction"], "user": dict[str, object], "_dropped_spans": int, - "_metrics_summary": dict[str, object], }, total=False, ) @@ -235,6 +234,32 @@ class SDKInfo(TypedDict): }, ) + MetricType = Literal["counter", "gauge", "distribution"] + + MetricAttributeValue = TypedDict( + "MetricAttributeValue", + { + "value": Union[str, bool, float, int], + "type": Literal["string", "boolean", "double", "integer"], + }, + ) + + Metric = TypedDict( + "Metric", + { + "timestamp": float, + "trace_id": Optional[str], + "span_id": Optional[str], + "name": str, + "type": MetricType, + "value": float, + "unit": Optional[str], + "attributes": dict[str, str | bool | float | int], + }, + ) + + MetricProcessor = Callable[[Metric, Hint], Optional[Metric]] + # TODO: Make a proper type definition for this (PRs welcome!) Breadcrumb = Dict[str, Any] @@ -266,36 +291,16 @@ class SDKInfo(TypedDict): "internal", "profile", "profile_chunk", - "metric_bucket", "monitor", "span", "log_item", + "trace_metric", ] SessionStatus = Literal["ok", "exited", "crashed", "abnormal"] ContinuousProfilerMode = Literal["thread", "gevent", "unknown"] ProfilerMode = Union[ContinuousProfilerMode, Literal["sleep"]] - # Type of the metric. - MetricType = Literal["d", "s", "g", "c"] - - # Value of the metric. - MetricValue = Union[int, float, str] - - # Internal representation of tags as a tuple of tuples (this is done in order to allow for the same key to exist - # multiple times). - MetricTagsInternal = Tuple[Tuple[str, str], ...] - - # External representation of tags as a dictionary. - MetricTagValue = Union[str, int, float, None] - MetricTags = Mapping[str, MetricTagValue] - - # Value inside the generator for the metric value. - FlushedMetricValue = Union[int, float] - - BucketKey = Tuple[MetricType, str, MeasurementUnit, MetricTagsInternal] - MetricMetaKey = Tuple[MetricType, str, MeasurementUnit] - MonitorConfigScheduleType = Literal["crontab", "interval"] MonitorConfigScheduleUnit = Literal[ "year", diff --git a/sentry_sdk/ai/__init__.py b/sentry_sdk/ai/__init__.py index e69de29bb2..fbcb9c061d 100644 --- a/sentry_sdk/ai/__init__.py +++ b/sentry_sdk/ai/__init__.py @@ -0,0 +1,7 @@ +from .utils import ( + set_data_normalized, + GEN_AI_MESSAGE_ROLE_MAPPING, + GEN_AI_MESSAGE_ROLE_REVERSE_MAPPING, + normalize_message_role, + normalize_message_roles, +) # noqa: F401 diff --git a/sentry_sdk/ai/utils.py b/sentry_sdk/ai/utils.py index d0ccf1bed3..0c0b937006 100644 --- a/sentry_sdk/ai/utils.py +++ b/sentry_sdk/ai/utils.py @@ -10,6 +10,26 @@ from sentry_sdk.utils import logger +class GEN_AI_ALLOWED_MESSAGE_ROLES: + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + + +GEN_AI_MESSAGE_ROLE_REVERSE_MAPPING = { + GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM: ["system"], + GEN_AI_ALLOWED_MESSAGE_ROLES.USER: ["user", "human"], + GEN_AI_ALLOWED_MESSAGE_ROLES.ASSISTANT: ["assistant", "ai"], + GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL: ["tool", "tool_call"], +} + +GEN_AI_MESSAGE_ROLE_MAPPING = {} +for target_role, source_roles in GEN_AI_MESSAGE_ROLE_REVERSE_MAPPING.items(): + for source_role in source_roles: + GEN_AI_MESSAGE_ROLE_MAPPING[source_role] = target_role + + def _normalize_data(data, unpack=True): # type: (Any, bool) -> Any # convert pydantic data (e.g. OpenAI v1+) to json compatible format @@ -40,6 +60,34 @@ def set_data_normalized(span, key, value, unpack=True): span.set_data(key, json.dumps(normalized)) +def normalize_message_role(role): + # type: (str) -> str + """ + Normalize a message role to one of the 4 allowed gen_ai role values. + Maps "ai" -> "assistant" and keeps other standard roles unchanged. + """ + return GEN_AI_MESSAGE_ROLE_MAPPING.get(role, role) + + +def normalize_message_roles(messages): + # type: (list[dict[str, Any]]) -> list[dict[str, Any]] + """ + Normalize roles in a list of messages to use standard gen_ai role values. + Creates a deep copy to avoid modifying the original messages. + """ + normalized_messages = [] + for message in messages: + if not isinstance(message, dict): + normalized_messages.append(message) + continue + normalized_message = message.copy() + if "role" in message: + normalized_message["role"] = normalize_message_role(message["role"]) + normalized_messages.append(normalized_message) + + return normalized_messages + + def get_start_span_function(): # type: () -> Callable[..., Any] current_span = sentry_sdk.get_current_span() diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index c06043ebe2..d17f922642 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -24,7 +24,9 @@ is_gevent, logger, get_before_send_log, + get_before_send_metric, has_logs_enabled, + has_metrics_enabled, ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace @@ -59,14 +61,14 @@ from typing import Union from typing import TypeVar - from sentry_sdk._types import Event, Hint, SDKInfo, Log + from sentry_sdk._types import Event, Hint, SDKInfo, Log, Metric from sentry_sdk.integrations import Integration - from sentry_sdk.metrics import MetricsAggregator from sentry_sdk.scope import Scope from sentry_sdk.session import Session from sentry_sdk.spotlight import SpotlightClient from sentry_sdk.transport import Transport from sentry_sdk._log_batcher import LogBatcher + from sentry_sdk._metrics_batcher import MetricsBatcher I = TypeVar("I", bound=Integration) # noqa: E741 @@ -182,8 +184,8 @@ def __init__(self, options=None): self.transport = None # type: Optional[Transport] self.monitor = None # type: Optional[Monitor] - self.metrics_aggregator = None # type: Optional[MetricsAggregator] self.log_batcher = None # type: Optional[LogBatcher] + self.metrics_batcher = None # type: Optional[MetricsBatcher] def __getstate__(self, *args, **kwargs): # type: (*Any, **Any) -> Any @@ -215,10 +217,14 @@ def capture_event(self, *args, **kwargs): # type: (*Any, **Any) -> Optional[str] return None - def _capture_experimental_log(self, log): + def _capture_log(self, log): # type: (Log) -> None pass + def _capture_metric(self, metric): + # type: (Metric) -> None + pass + def capture_session(self, *args, **kwargs): # type: (*Any, **Any) -> None return None @@ -361,26 +367,6 @@ def _capture_envelope(envelope): self.session_flusher = SessionFlusher(capture_func=_capture_envelope) - self.metrics_aggregator = None # type: Optional[MetricsAggregator] - experiments = self.options.get("_experiments", {}) - if experiments.get("enable_metrics", True): - # Context vars are not working correctly on Python <=3.6 - # with gevent. - metrics_supported = not is_gevent() or PY37 - if metrics_supported: - from sentry_sdk.metrics import MetricsAggregator - - self.metrics_aggregator = MetricsAggregator( - capture_func=_capture_envelope, - enable_code_locations=bool( - experiments.get("metric_code_locations", True) - ), - ) - else: - logger.info( - "Metrics not supported on Python 3.6 and lower with gevent." - ) - self.log_batcher = None if has_logs_enabled(self.options): @@ -388,6 +374,13 @@ def _capture_envelope(envelope): self.log_batcher = LogBatcher(capture_func=_capture_envelope) + self.metrics_batcher = None + + if has_metrics_enabled(self.options): + from sentry_sdk._metrics_batcher import MetricsBatcher + + self.metrics_batcher = MetricsBatcher(capture_func=_capture_envelope) + max_request_body_size = ("always", "never", "small", "medium") if self.options["max_request_body_size"] not in max_request_body_size: raise ValueError( @@ -467,7 +460,6 @@ def _capture_envelope(envelope): if ( self.monitor - or self.metrics_aggregator or self.log_batcher or has_profiling_enabled(self.options) or isinstance(self.transport, BaseHttpTransport) @@ -900,7 +892,7 @@ def capture_event( return return_value - def _capture_experimental_log(self, log): + def _capture_log(self, log): # type: (Optional[Log]) -> None if not has_logs_enabled(self.options) or log is None: return @@ -967,6 +959,65 @@ def _capture_experimental_log(self, log): if self.log_batcher: self.log_batcher.add(log) + def _capture_metric(self, metric): + # type: (Optional[Metric]) -> None + if not has_metrics_enabled(self.options) or metric is None: + return + + isolation_scope = sentry_sdk.get_isolation_scope() + + metric["attributes"]["sentry.sdk.name"] = SDK_INFO["name"] + metric["attributes"]["sentry.sdk.version"] = SDK_INFO["version"] + + environment = self.options.get("environment") + if environment is not None and "sentry.environment" not in metric["attributes"]: + metric["attributes"]["sentry.environment"] = environment + + release = self.options.get("release") + if release is not None and "sentry.release" not in metric["attributes"]: + metric["attributes"]["sentry.release"] = release + + span = sentry_sdk.get_current_span() + metric["trace_id"] = "00000000-0000-0000-0000-000000000000" + + if span: + metric["trace_id"] = span.trace_id + metric["span_id"] = span.span_id + else: + propagation_context = isolation_scope.get_active_propagation_context() + if propagation_context and propagation_context.trace_id: + metric["trace_id"] = propagation_context.trace_id + + if isolation_scope._user is not None: + for metric_attribute, user_attribute in ( + ("user.id", "id"), + ("user.name", "username"), + ("user.email", "email"), + ): + if ( + user_attribute in isolation_scope._user + and metric_attribute not in metric["attributes"] + ): + metric["attributes"][metric_attribute] = isolation_scope._user[ + user_attribute + ] + + debug = self.options.get("debug", False) + if debug: + logger.debug( + f"[Sentry Metrics] [{metric.get('type')}] {metric.get('name')}: {metric.get('value')}" + ) + + before_send_metric = get_before_send_metric(self.options) + if before_send_metric is not None: + metric = before_send_metric(metric, {}) + + if metric is None: + return + + if self.metrics_batcher: + self.metrics_batcher.add(metric) + def capture_session( self, session, # type: Session @@ -1019,10 +1070,10 @@ def close( if self.transport is not None: self.flush(timeout=timeout, callback=callback) self.session_flusher.kill() - if self.metrics_aggregator is not None: - self.metrics_aggregator.kill() if self.log_batcher is not None: self.log_batcher.kill() + if self.metrics_batcher is not None: + self.metrics_batcher.kill() if self.monitor: self.monitor.kill() self.transport.kill() @@ -1045,10 +1096,10 @@ def flush( if timeout is None: timeout = self.options["shutdown_timeout"] self.session_flusher.flush() - if self.metrics_aggregator is not None: - self.metrics_aggregator.flush() if self.log_batcher is not None: self.log_batcher.flush() + if self.metrics_batcher is not None: + self.metrics_batcher.flush() self.transport.flush(timeout=timeout, callback=callback) def __enter__(self): diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 5bcc487037..2a3c9411be 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -52,11 +52,10 @@ class CompressionAlgo(Enum): Hint, Log, MeasurementUnit, + Metric, ProfilerMode, TracesSampler, TransactionProcessor, - MetricTags, - MetricValue, ) # Experiments are feature flags to enable and disable certain unstable SDK @@ -77,13 +76,10 @@ class CompressionAlgo(Enum): "transport_compression_algo": Optional[CompressionAlgo], "transport_num_pools": Optional[int], "transport_http2": Optional[bool], - "enable_metrics": Optional[bool], - "before_emit_metric": Optional[ - Callable[[str, MetricValue, MeasurementUnit, MetricTags], bool] - ], - "metric_code_locations": Optional[bool], "enable_logs": Optional[bool], "before_send_log": Optional[Callable[[Log, Hint], Optional[Log]]], + "enable_metrics": Optional[bool], + "before_send_metric": Optional[Callable[[Metric, Hint], Optional[Metric]]], }, total=False, ) @@ -839,6 +835,7 @@ class OP: QUEUE_TASK_HUEY = "queue.task.huey" QUEUE_SUBMIT_RAY = "queue.submit.ray" QUEUE_TASK_RAY = "queue.task.ray" + QUEUE_TASK_DRAMATIQ = "queue.task.dramatiq" SUBPROCESS = "subprocess" SUBPROCESS_WAIT = "subprocess.wait" SUBPROCESS_COMMUNICATE = "subprocess.communicate" @@ -912,6 +909,8 @@ def __init__( error_sampler=None, # type: Optional[Callable[[Event, Hint], Union[float, bool]]] enable_db_query_source=True, # type: bool db_query_source_threshold_ms=100, # type: int + enable_http_request_source=False, # type: bool + http_request_source_threshold_ms=100, # type: int spotlight=None, # type: Optional[Union[bool, str]] cert_file=None, # type: Optional[str] key_file=None, # type: Optional[str] @@ -1267,6 +1266,13 @@ def __init__( The query location will be added to the query for queries slower than the specified threshold. + :param enable_http_request_source: When enabled, the source location will be added to outgoing HTTP requests. + + :param http_request_source_threshold_ms: The threshold in milliseconds for adding the source location to an + outgoing HTTP request. + + The request location will be added to the request for requests slower than the specified threshold. + :param custom_repr: A custom `repr `_ function to run while serializing an object. @@ -1342,4 +1348,4 @@ def _get_default_options(): del _get_default_options -VERSION = "2.39.0" +VERSION = "2.42.0" diff --git a/sentry_sdk/envelope.py b/sentry_sdk/envelope.py index d9b2c1629a..56bb5fde73 100644 --- a/sentry_sdk/envelope.py +++ b/sentry_sdk/envelope.py @@ -285,14 +285,14 @@ def data_category(self): return "error" elif ty == "log": return "log_item" + elif ty == "trace_metric": + return "trace_metric" elif ty == "client_report": return "internal" elif ty == "profile": return "profile" elif ty == "profile_chunk": return "profile_chunk" - elif ty == "statsd": - return "metric_bucket" elif ty == "check_in": return "monitor" else: @@ -354,7 +354,7 @@ def deserialize_from( # if no length was specified we need to read up to the end of line # and remove it (if it is present, i.e. not the very last char in an eof terminated envelope) payload = f.readline().rstrip(b"\n") - if headers.get("type") in ("event", "transaction", "metric_buckets"): + if headers.get("type") in ("event", "transaction"): rv = cls(headers=headers, payload=PayloadRef(json=parse_json(payload))) else: rv = cls(headers=headers, payload=payload) diff --git a/sentry_sdk/integrations/__init__.py b/sentry_sdk/integrations/__init__.py index e397c9986a..9e279b8345 100644 --- a/sentry_sdk/integrations/__init__.py +++ b/sentry_sdk/integrations/__init__.py @@ -140,12 +140,14 @@ def iter_default_integrations(with_auto_enabling_integrations): "flask": (1, 1, 4), "gql": (3, 4, 1), "graphene": (3, 3), + "google_genai": (1, 29, 0), # google-genai "grpc": (1, 32, 0), # grpcio "httpx": (0, 16, 0), "huggingface_hub": (0, 24, 7), "langchain": (0, 1, 0), "langgraph": (0, 6, 6), "launchdarkly": (9, 8, 0), + "litellm": (1, 77, 5), "loguru": (0, 7, 0), "openai": (1, 0, 0), "openai_agents": (0, 0, 19), diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py index ad3202bf2c..0a417f8dc4 100644 --- a/sentry_sdk/integrations/aiohttp.py +++ b/sentry_sdk/integrations/aiohttp.py @@ -22,7 +22,7 @@ SOURCE_FOR_STYLE, TransactionSource, ) -from sentry_sdk.tracing_utils import should_propagate_trace +from sentry_sdk.tracing_utils import should_propagate_trace, add_http_request_source from sentry_sdk.utils import ( capture_internal_exceptions, ensure_integration_enabled, @@ -279,6 +279,9 @@ async def on_request_end(session, trace_config_ctx, params): span.set_data("reason", params.response.reason) span.finish() + with capture_internal_exceptions(): + add_http_request_source(span) + trace_config = TraceConfig() trace_config.on_request_start.append(on_request_start) diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index d9898fa1d1..46c6b2a766 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -3,7 +3,11 @@ import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage -from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function +from sentry_sdk.ai.utils import ( + set_data_normalized, + normalize_message_roles, + get_start_span_function, +) from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -140,8 +144,12 @@ def _set_input_data(span, kwargs, integration): else: normalized_messages.append(message) + role_normalized_messages = normalize_message_roles(normalized_messages) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + role_normalized_messages, + unpack=False, ) set_data_normalized( diff --git a/sentry_sdk/integrations/dramatiq.py b/sentry_sdk/integrations/dramatiq.py index a756b4c669..8b85831cf4 100644 --- a/sentry_sdk/integrations/dramatiq.py +++ b/sentry_sdk/integrations/dramatiq.py @@ -1,18 +1,31 @@ import json import sentry_sdk -from sentry_sdk.integrations import Integration +from sentry_sdk.consts import OP, SPANSTATUS +from sentry_sdk.api import continue_trace, get_baggage, get_traceparent +from sentry_sdk.integrations import Integration, DidNotEnable from sentry_sdk.integrations._wsgi_common import request_body_within_bounds +from sentry_sdk.tracing import ( + BAGGAGE_HEADER_NAME, + SENTRY_TRACE_HEADER_NAME, + TransactionSource, +) from sentry_sdk.utils import ( AnnotatedValue, capture_internal_exceptions, event_from_exception, ) +from typing import TypeVar + +R = TypeVar("R") -from dramatiq.broker import Broker # type: ignore -from dramatiq.message import Message # type: ignore -from dramatiq.middleware import Middleware, default_middleware # type: ignore -from dramatiq.errors import Retry # type: ignore +try: + from dramatiq.broker import Broker + from dramatiq.middleware import Middleware, default_middleware + from dramatiq.errors import Retry + from dramatiq.message import Message +except ImportError: + raise DidNotEnable("Dramatiq is not installed") from typing import TYPE_CHECKING @@ -34,10 +47,12 @@ class DramatiqIntegration(Integration): """ identifier = "dramatiq" + origin = f"auto.queue.{identifier}" @staticmethod def setup_once(): # type: () -> None + _patch_dramatiq_broker() @@ -85,22 +100,54 @@ class SentryMiddleware(Middleware): # type: ignore[misc] DramatiqIntegration. """ - def before_process_message(self, broker, message): - # type: (Broker, Message) -> None + SENTRY_HEADERS_NAME = "_sentry_headers" + + def before_enqueue(self, broker, message, delay): + # type: (Broker, Message[R], int) -> None integration = sentry_sdk.get_client().get_integration(DramatiqIntegration) if integration is None: return - message._scope_manager = sentry_sdk.new_scope() - message._scope_manager.__enter__() + message.options[self.SENTRY_HEADERS_NAME] = { + BAGGAGE_HEADER_NAME: get_baggage(), + SENTRY_TRACE_HEADER_NAME: get_traceparent(), + } + + def before_process_message(self, broker, message): + # type: (Broker, Message[R]) -> None + integration = sentry_sdk.get_client().get_integration(DramatiqIntegration) + if integration is None: + return - scope = sentry_sdk.get_current_scope() - scope.set_transaction_name(message.actor_name) + message._scope_manager = sentry_sdk.isolation_scope() + scope = message._scope_manager.__enter__() + scope.clear_breadcrumbs() scope.set_extra("dramatiq_message_id", message.message_id) scope.add_event_processor(_make_message_event_processor(message, integration)) + sentry_headers = message.options.get(self.SENTRY_HEADERS_NAME) or {} + if "retries" in message.options: + # start new trace in case of retrying + sentry_headers = {} + + transaction = continue_trace( + sentry_headers, + name=message.actor_name, + op=OP.QUEUE_TASK_DRAMATIQ, + source=TransactionSource.TASK, + origin=DramatiqIntegration.origin, + ) + transaction.set_status(SPANSTATUS.OK) + sentry_sdk.start_transaction( + transaction, + name=message.actor_name, + op=OP.QUEUE_TASK_DRAMATIQ, + source=TransactionSource.TASK, + ) + transaction.__enter__() + def after_process_message(self, broker, message, *, result=None, exception=None): - # type: (Broker, Message, Any, Optional[Any], Optional[Exception]) -> None + # type: (Broker, Message[R], Optional[Any], Optional[Exception]) -> None integration = sentry_sdk.get_client().get_integration(DramatiqIntegration) if integration is None: return @@ -108,27 +155,38 @@ def after_process_message(self, broker, message, *, result=None, exception=None) actor = broker.get_actor(message.actor_name) throws = message.options.get("throws") or actor.options.get("throws") - try: - if ( - exception is not None - and not (throws and isinstance(exception, throws)) - and not isinstance(exception, Retry) - ): - event, hint = event_from_exception( - exception, - client_options=sentry_sdk.get_client().options, - mechanism={ - "type": DramatiqIntegration.identifier, - "handled": False, - }, - ) - sentry_sdk.capture_event(event, hint=hint) - finally: - message._scope_manager.__exit__(None, None, None) + scope_manager = message._scope_manager + transaction = sentry_sdk.get_current_scope().transaction + if not transaction: + return None + + is_event_capture_required = ( + exception is not None + and not (throws and isinstance(exception, throws)) + and not isinstance(exception, Retry) + ) + if not is_event_capture_required: + # normal transaction finish + transaction.__exit__(None, None, None) + scope_manager.__exit__(None, None, None) + return + + event, hint = event_from_exception( + exception, # type: ignore[arg-type] + client_options=sentry_sdk.get_client().options, + mechanism={ + "type": DramatiqIntegration.identifier, + "handled": False, + }, + ) + sentry_sdk.capture_event(event, hint=hint) + # transaction error + transaction.__exit__(type(exception), exception, None) + scope_manager.__exit__(type(exception), exception, None) def _make_message_event_processor(message, integration): - # type: (Message, DramatiqIntegration) -> Callable[[Event, Hint], Optional[Event]] + # type: (Message[R], DramatiqIntegration) -> Callable[[Event, Hint], Optional[Event]] def inner(event, hint): # type: (Event, Hint) -> Optional[Event] @@ -142,7 +200,7 @@ def inner(event, hint): class DramatiqMessageExtractor: def __init__(self, message): - # type: (Message) -> None + # type: (Message[R]) -> None self.message_data = dict(message.asdict()) def content_length(self): diff --git a/sentry_sdk/integrations/google_genai/__init__.py b/sentry_sdk/integrations/google_genai/__init__.py new file mode 100644 index 0000000000..7175b64340 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/__init__.py @@ -0,0 +1,298 @@ +from functools import wraps +from typing import ( + Any, + AsyncIterator, + Callable, + Iterator, + List, +) + +import sentry_sdk +from sentry_sdk.ai.utils import get_start_span_function +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.tracing import SPANSTATUS + + +try: + from google.genai.models import Models, AsyncModels +except ImportError: + raise DidNotEnable("google-genai not installed") + + +from .consts import IDENTIFIER, ORIGIN, GEN_AI_SYSTEM +from .utils import ( + set_span_data_for_request, + set_span_data_for_response, + _capture_exception, + prepare_generate_content_args, +) +from .streaming import ( + set_span_data_for_streaming_response, + accumulate_streaming_response, +) + + +class GoogleGenAIIntegration(Integration): + identifier = IDENTIFIER + origin = ORIGIN + + def __init__(self, include_prompts=True): + # type: (GoogleGenAIIntegration, bool) -> None + self.include_prompts = include_prompts + + @staticmethod + def setup_once(): + # type: () -> None + # Patch sync methods + Models.generate_content = _wrap_generate_content(Models.generate_content) + Models.generate_content_stream = _wrap_generate_content_stream( + Models.generate_content_stream + ) + + # Patch async methods + AsyncModels.generate_content = _wrap_async_generate_content( + AsyncModels.generate_content + ) + AsyncModels.generate_content_stream = _wrap_async_generate_content_stream( + AsyncModels.generate_content_stream + ) + + +def _wrap_generate_content_stream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + def new_generate_content_stream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return f(self, *args, **kwargs) + + _model, contents, model_name = prepare_generate_content_args(args, kwargs) + + span = get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) + span.__enter__() + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + chat_span = sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) + chat_span.__enter__() + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request(chat_span, integration, model_name, contents, kwargs) + chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + try: + stream = f(self, *args, **kwargs) + + # Create wrapper iterator to accumulate responses + def new_iterator(): + # type: () -> Iterator[Any] + chunks = [] # type: List[Any] + try: + for chunk in stream: + chunks.append(chunk) + yield chunk + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + finally: + # Accumulate all chunks and set final response data on spans + if chunks: + accumulated_response = accumulate_streaming_response(chunks) + set_span_data_for_streaming_response( + chat_span, integration, accumulated_response + ) + set_span_data_for_streaming_response( + span, integration, accumulated_response + ) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + + return new_iterator() + + except Exception as exc: + _capture_exception(exc) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + raise + + return new_generate_content_stream + + +def _wrap_async_generate_content_stream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + async def new_async_generate_content_stream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return await f(self, *args, **kwargs) + + _model, contents, model_name = prepare_generate_content_args(args, kwargs) + + span = get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) + span.__enter__() + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + chat_span = sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) + chat_span.__enter__() + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request(chat_span, integration, model_name, contents, kwargs) + chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + try: + stream = await f(self, *args, **kwargs) + + # Create wrapper async iterator to accumulate responses + async def new_async_iterator(): + # type: () -> AsyncIterator[Any] + chunks = [] # type: List[Any] + try: + async for chunk in stream: + chunks.append(chunk) + yield chunk + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + finally: + # Accumulate all chunks and set final response data on spans + if chunks: + accumulated_response = accumulate_streaming_response(chunks) + set_span_data_for_streaming_response( + chat_span, integration, accumulated_response + ) + set_span_data_for_streaming_response( + span, integration, accumulated_response + ) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + + return new_async_iterator() + + except Exception as exc: + _capture_exception(exc) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + raise + + return new_async_generate_content_stream + + +def _wrap_generate_content(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + def new_generate_content(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return f(self, *args, **kwargs) + + model, contents, model_name = prepare_generate_content_args(args, kwargs) + + with get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) as span: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) as chat_span: + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request( + chat_span, integration, model_name, contents, kwargs + ) + + try: + response = f(self, *args, **kwargs) + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + + set_span_data_for_response(chat_span, integration, response) + set_span_data_for_response(span, integration, response) + + return response + + return new_generate_content + + +def _wrap_async_generate_content(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + async def new_async_generate_content(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return await f(self, *args, **kwargs) + + model, contents, model_name = prepare_generate_content_args(args, kwargs) + + with get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) as span: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) as chat_span: + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request( + chat_span, integration, model_name, contents, kwargs + ) + try: + response = await f(self, *args, **kwargs) + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + + set_span_data_for_response(chat_span, integration, response) + set_span_data_for_response(span, integration, response) + + return response + + return new_async_generate_content diff --git a/sentry_sdk/integrations/google_genai/consts.py b/sentry_sdk/integrations/google_genai/consts.py new file mode 100644 index 0000000000..5b53ebf0e2 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/consts.py @@ -0,0 +1,16 @@ +GEN_AI_SYSTEM = "gcp.gemini" + +# Mapping of tool attributes to their descriptions +# These are all tools that are available in the Google GenAI API +TOOL_ATTRIBUTES_MAP = { + "google_search_retrieval": "Google Search retrieval tool", + "google_search": "Google Search tool", + "retrieval": "Retrieval tool", + "enterprise_web_search": "Enterprise web search tool", + "google_maps": "Google Maps tool", + "code_execution": "Code execution tool", + "computer_use": "Computer use tool", +} + +IDENTIFIER = "google_genai" +ORIGIN = f"auto.ai.{IDENTIFIER}" diff --git a/sentry_sdk/integrations/google_genai/streaming.py b/sentry_sdk/integrations/google_genai/streaming.py new file mode 100644 index 0000000000..03d09aadf6 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/streaming.py @@ -0,0 +1,155 @@ +from typing import ( + TYPE_CHECKING, + Any, + List, + TypedDict, + Optional, +) + +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import SPANDATA +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import ( + safe_serialize, +) +from .utils import ( + extract_tool_calls, + extract_finish_reasons, + extract_contents_text, + extract_usage_data, + UsageData, +) + +if TYPE_CHECKING: + from sentry_sdk.tracing import Span + from google.genai.types import GenerateContentResponse + + +class AccumulatedResponse(TypedDict): + id: Optional[str] + model: Optional[str] + text: str + finish_reasons: List[str] + tool_calls: List[dict[str, Any]] + usage_metadata: UsageData + + +def accumulate_streaming_response(chunks): + # type: (List[GenerateContentResponse]) -> AccumulatedResponse + """Accumulate streaming chunks into a single response-like object.""" + accumulated_text = [] + finish_reasons = [] + tool_calls = [] + total_input_tokens = 0 + total_output_tokens = 0 + total_tokens = 0 + total_cached_tokens = 0 + total_reasoning_tokens = 0 + response_id = None + model = None + + for chunk in chunks: + # Extract text and tool calls + if getattr(chunk, "candidates", None): + for candidate in getattr(chunk, "candidates", []): + if hasattr(candidate, "content") and getattr( + candidate.content, "parts", [] + ): + extracted_text = extract_contents_text(candidate.content) + if extracted_text: + accumulated_text.append(extracted_text) + + extracted_finish_reasons = extract_finish_reasons(chunk) + if extracted_finish_reasons: + finish_reasons.extend(extracted_finish_reasons) + + extracted_tool_calls = extract_tool_calls(chunk) + if extracted_tool_calls: + tool_calls.extend(extracted_tool_calls) + + # Accumulate token usage + extracted_usage_data = extract_usage_data(chunk) + total_input_tokens += extracted_usage_data["input_tokens"] + total_output_tokens += extracted_usage_data["output_tokens"] + total_cached_tokens += extracted_usage_data["input_tokens_cached"] + total_reasoning_tokens += extracted_usage_data["output_tokens_reasoning"] + total_tokens += extracted_usage_data["total_tokens"] + + accumulated_response = AccumulatedResponse( + text="".join(accumulated_text), + finish_reasons=finish_reasons, + tool_calls=tool_calls, + usage_metadata=UsageData( + input_tokens=total_input_tokens, + output_tokens=total_output_tokens, + input_tokens_cached=total_cached_tokens, + output_tokens_reasoning=total_reasoning_tokens, + total_tokens=total_tokens, + ), + id=response_id, + model=model, + ) + + return accumulated_response + + +def set_span_data_for_streaming_response(span, integration, accumulated_response): + # type: (Span, Any, AccumulatedResponse) -> None + """Set span data for accumulated streaming response.""" + if ( + should_send_default_pii() + and integration.include_prompts + and accumulated_response.get("text") + ): + span.set_data( + SPANDATA.GEN_AI_RESPONSE_TEXT, + safe_serialize([accumulated_response["text"]]), + ) + + if accumulated_response.get("finish_reasons"): + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + accumulated_response["finish_reasons"], + ) + + if accumulated_response.get("tool_calls"): + span.set_data( + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + safe_serialize(accumulated_response["tool_calls"]), + ) + + if accumulated_response.get("id"): + span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, accumulated_response["id"]) + if accumulated_response.get("model"): + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, accumulated_response["model"]) + + if accumulated_response["usage_metadata"]["input_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, + accumulated_response["usage_metadata"]["input_tokens"], + ) + + if accumulated_response["usage_metadata"]["input_tokens_cached"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + accumulated_response["usage_metadata"]["input_tokens_cached"], + ) + + if accumulated_response["usage_metadata"]["output_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, + accumulated_response["usage_metadata"]["output_tokens"], + ) + + if accumulated_response["usage_metadata"]["output_tokens_reasoning"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + accumulated_response["usage_metadata"]["output_tokens_reasoning"], + ) + + if accumulated_response["usage_metadata"]["total_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, + accumulated_response["usage_metadata"]["total_tokens"], + ) diff --git a/sentry_sdk/integrations/google_genai/utils.py b/sentry_sdk/integrations/google_genai/utils.py new file mode 100644 index 0000000000..ff973b02d9 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/utils.py @@ -0,0 +1,566 @@ +import copy +import inspect +from functools import wraps +from .consts import ORIGIN, TOOL_ATTRIBUTES_MAP, GEN_AI_SYSTEM +from typing import ( + cast, + TYPE_CHECKING, + Iterable, + Any, + Callable, + List, + Optional, + Union, + TypedDict, +) + +import sentry_sdk +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import ( + capture_internal_exceptions, + event_from_exception, + safe_serialize, +) +from google.genai.types import GenerateContentConfig + +if TYPE_CHECKING: + from sentry_sdk.tracing import Span + from google.genai.types import ( + GenerateContentResponse, + ContentListUnion, + Tool, + Model, + ) + + +class UsageData(TypedDict): + """Structure for token usage data.""" + + input_tokens: int + input_tokens_cached: int + output_tokens: int + output_tokens_reasoning: int + total_tokens: int + + +def extract_usage_data(response): + # type: (Union[GenerateContentResponse, dict[str, Any]]) -> UsageData + """Extract usage data from response into a structured format. + + Args: + response: The GenerateContentResponse object or dictionary containing usage metadata + + Returns: + UsageData: Dictionary with input_tokens, input_tokens_cached, + output_tokens, and output_tokens_reasoning fields + """ + usage_data = UsageData( + input_tokens=0, + input_tokens_cached=0, + output_tokens=0, + output_tokens_reasoning=0, + total_tokens=0, + ) + + # Handle dictionary response (from streaming) + if isinstance(response, dict): + usage = response.get("usage_metadata", {}) + if not usage: + return usage_data + + prompt_tokens = usage.get("prompt_token_count", 0) or 0 + tool_use_prompt_tokens = usage.get("tool_use_prompt_token_count", 0) or 0 + usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens + + cached_tokens = usage.get("cached_content_token_count", 0) or 0 + usage_data["input_tokens_cached"] = cached_tokens + + reasoning_tokens = usage.get("thoughts_token_count", 0) or 0 + usage_data["output_tokens_reasoning"] = reasoning_tokens + + candidates_tokens = usage.get("candidates_token_count", 0) or 0 + # python-genai reports output and reasoning tokens separately + # reasoning should be sub-category of output tokens + usage_data["output_tokens"] = candidates_tokens + reasoning_tokens + + total_tokens = usage.get("total_token_count", 0) or 0 + usage_data["total_tokens"] = total_tokens + + return usage_data + + if not hasattr(response, "usage_metadata"): + return usage_data + + usage = response.usage_metadata + + # Input tokens include both prompt and tool use prompt tokens + prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0 + tool_use_prompt_tokens = getattr(usage, "tool_use_prompt_token_count", 0) or 0 + usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens + + # Cached input tokens + cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0 + usage_data["input_tokens_cached"] = cached_tokens + + # Reasoning tokens + reasoning_tokens = getattr(usage, "thoughts_token_count", 0) or 0 + usage_data["output_tokens_reasoning"] = reasoning_tokens + + # output_tokens = candidates_tokens + reasoning_tokens + # google-genai reports output and reasoning tokens separately + candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0 + usage_data["output_tokens"] = candidates_tokens + reasoning_tokens + + total_tokens = getattr(usage, "total_token_count", 0) or 0 + usage_data["total_tokens"] = total_tokens + + return usage_data + + +def _capture_exception(exc): + # type: (Any) -> None + """Capture exception with Google GenAI mechanism.""" + event, hint = event_from_exception( + exc, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "google_genai", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + + +def get_model_name(model): + # type: (Union[str, Model]) -> str + """Extract model name from model parameter.""" + if isinstance(model, str): + return model + # Handle case where model might be an object with a name attribute + if hasattr(model, "name"): + return str(model.name) + return str(model) + + +def extract_contents_text(contents): + # type: (ContentListUnion) -> Optional[str] + """Extract text from contents parameter which can have various formats.""" + if contents is None: + return None + + # Simple string case + if isinstance(contents, str): + return contents + + # List of contents or parts + if isinstance(contents, list): + texts = [] + for item in contents: + # Recursively extract text from each item + extracted = extract_contents_text(item) + if extracted: + texts.append(extracted) + return " ".join(texts) if texts else None + + # Dictionary case + if isinstance(contents, dict): + if "text" in contents: + return contents["text"] + # Try to extract from parts if present in dict + if "parts" in contents: + return extract_contents_text(contents["parts"]) + + # Content object with parts - recurse into parts + if getattr(contents, "parts", None): + return extract_contents_text(contents.parts) + + # Direct text attribute + if hasattr(contents, "text"): + return contents.text + + return None + + +def _format_tools_for_span(tools): + # type: (Iterable[Tool | Callable[..., Any]]) -> Optional[List[dict[str, Any]]] + """Format tools parameter for span data.""" + formatted_tools = [] + for tool in tools: + if callable(tool): + # Handle callable functions passed directly + formatted_tools.append( + { + "name": getattr(tool, "__name__", "unknown"), + "description": getattr(tool, "__doc__", None), + } + ) + elif ( + hasattr(tool, "function_declarations") + and tool.function_declarations is not None + ): + # Tool object with function declarations + for func_decl in tool.function_declarations: + formatted_tools.append( + { + "name": getattr(func_decl, "name", None), + "description": getattr(func_decl, "description", None), + } + ) + else: + # Check for predefined tool attributes - each of these tools + # is an attribute of the tool object, by default set to None + for attr_name, description in TOOL_ATTRIBUTES_MAP.items(): + if getattr(tool, attr_name, None): + formatted_tools.append( + { + "name": attr_name, + "description": description, + } + ) + break + + return formatted_tools if formatted_tools else None + + +def extract_tool_calls(response): + # type: (GenerateContentResponse) -> Optional[List[dict[str, Any]]] + """Extract tool/function calls from response candidates and automatic function calling history.""" + + tool_calls = [] + + # Extract from candidates, sometimes tool calls are nested under the content.parts object + if getattr(response, "candidates", []): + for candidate in response.candidates: + if not hasattr(candidate, "content") or not getattr( + candidate.content, "parts", [] + ): + continue + + for part in candidate.content.parts: + if getattr(part, "function_call", None): + function_call = part.function_call + tool_call = { + "name": getattr(function_call, "name", None), + "type": "function_call", + } + + # Extract arguments if available + if getattr(function_call, "args", None): + tool_call["arguments"] = safe_serialize(function_call.args) + + tool_calls.append(tool_call) + + # Extract from automatic_function_calling_history + # This is the history of tool calls made by the model + if getattr(response, "automatic_function_calling_history", None): + for content in response.automatic_function_calling_history: + if not getattr(content, "parts", None): + continue + + for part in getattr(content, "parts", []): + if getattr(part, "function_call", None): + function_call = part.function_call + tool_call = { + "name": getattr(function_call, "name", None), + "type": "function_call", + } + + # Extract arguments if available + if hasattr(function_call, "args"): + tool_call["arguments"] = safe_serialize(function_call.args) + + tool_calls.append(tool_call) + + return tool_calls if tool_calls else None + + +def _capture_tool_input(args, kwargs, tool): + # type: (tuple[Any, ...], dict[str, Any], Tool) -> dict[str, Any] + """Capture tool input from args and kwargs.""" + tool_input = kwargs.copy() if kwargs else {} + + # If we have positional args, try to map them to the function signature + if args: + try: + sig = inspect.signature(tool) + param_names = list(sig.parameters.keys()) + for i, arg in enumerate(args): + if i < len(param_names): + tool_input[param_names[i]] = arg + except Exception: + # Fallback if we can't get the signature + tool_input["args"] = args + + return tool_input + + +def _create_tool_span(tool_name, tool_doc): + # type: (str, Optional[str]) -> Span + """Create a span for tool execution.""" + span = sentry_sdk.start_span( + op=OP.GEN_AI_EXECUTE_TOOL, + name=f"execute_tool {tool_name}", + origin=ORIGIN, + ) + span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name) + span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function") + if tool_doc: + span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_doc) + return span + + +def wrapped_tool(tool): + # type: (Tool | Callable[..., Any]) -> Tool | Callable[..., Any] + """Wrap a tool to emit execute_tool spans when called.""" + if not callable(tool): + # Not a callable function, return as-is (predefined tools) + return tool + + tool_name = getattr(tool, "__name__", "unknown") + tool_doc = tool.__doc__ + + if inspect.iscoroutinefunction(tool): + # Async function + @wraps(tool) + async def async_wrapped(*args, **kwargs): + # type: (Any, Any) -> Any + with _create_tool_span(tool_name, tool_doc) as span: + # Capture tool input + tool_input = _capture_tool_input(args, kwargs, tool) + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input) + ) + + try: + result = await tool(*args, **kwargs) + + # Capture tool output + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result) + ) + + return result + except Exception as exc: + _capture_exception(exc) + raise + + return async_wrapped + else: + # Sync function + @wraps(tool) + def sync_wrapped(*args, **kwargs): + # type: (Any, Any) -> Any + with _create_tool_span(tool_name, tool_doc) as span: + # Capture tool input + tool_input = _capture_tool_input(args, kwargs, tool) + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input) + ) + + try: + result = tool(*args, **kwargs) + + # Capture tool output + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result) + ) + + return result + except Exception as exc: + _capture_exception(exc) + raise + + return sync_wrapped + + +def wrapped_config_with_tools(config): + # type: (GenerateContentConfig) -> GenerateContentConfig + """Wrap tools in config to emit execute_tool spans. Tools are sometimes passed directly as + callable functions as a part of the config object.""" + + if not config or not getattr(config, "tools", None): + return config + + result = copy.copy(config) + result.tools = [wrapped_tool(tool) for tool in config.tools] + + return result + + +def _extract_response_text(response): + # type: (GenerateContentResponse) -> Optional[List[str]] + """Extract text from response candidates.""" + + if not response or not getattr(response, "candidates", []): + return None + + texts = [] + for candidate in response.candidates: + if not hasattr(candidate, "content") or not hasattr(candidate.content, "parts"): + continue + + for part in candidate.content.parts: + if getattr(part, "text", None): + texts.append(part.text) + + return texts if texts else None + + +def extract_finish_reasons(response): + # type: (GenerateContentResponse) -> Optional[List[str]] + """Extract finish reasons from response candidates.""" + if not response or not getattr(response, "candidates", []): + return None + + finish_reasons = [] + for candidate in response.candidates: + if getattr(candidate, "finish_reason", None): + # Convert enum value to string if necessary + reason = str(candidate.finish_reason) + # Remove enum prefix if present (e.g., "FinishReason.STOP" -> "STOP") + if "." in reason: + reason = reason.split(".")[-1] + finish_reasons.append(reason) + + return finish_reasons if finish_reasons else None + + +def set_span_data_for_request(span, integration, model, contents, kwargs): + # type: (Span, Any, str, ContentListUnion, dict[str, Any]) -> None + """Set span data for the request.""" + span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + + if kwargs.get("stream", False): + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + config = kwargs.get("config") + + if config is None: + return + + config = cast(GenerateContentConfig, config) + + # Set input messages/prompts if PII is allowed + if should_send_default_pii() and integration.include_prompts: + messages = [] + + # Add system instruction if present + if hasattr(config, "system_instruction"): + system_instruction = config.system_instruction + if system_instruction: + system_text = extract_contents_text(system_instruction) + if system_text: + messages.append({"role": "system", "content": system_text}) + + # Add user message + contents_text = extract_contents_text(contents) + if contents_text: + messages.append({"role": "user", "content": contents_text}) + + if messages: + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + messages, + unpack=False, + ) + + # Extract parameters directly from config (not nested under generation_config) + for param, span_key in [ + ("temperature", SPANDATA.GEN_AI_REQUEST_TEMPERATURE), + ("top_p", SPANDATA.GEN_AI_REQUEST_TOP_P), + ("top_k", SPANDATA.GEN_AI_REQUEST_TOP_K), + ("max_output_tokens", SPANDATA.GEN_AI_REQUEST_MAX_TOKENS), + ("presence_penalty", SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY), + ("frequency_penalty", SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY), + ("seed", SPANDATA.GEN_AI_REQUEST_SEED), + ]: + if hasattr(config, param): + value = getattr(config, param) + if value is not None: + span.set_data(span_key, value) + + # Set tools if available + if hasattr(config, "tools"): + tools = config.tools + if tools: + formatted_tools = _format_tools_for_span(tools) + if formatted_tools: + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + formatted_tools, + unpack=False, + ) + + +def set_span_data_for_response(span, integration, response): + # type: (Span, Any, GenerateContentResponse) -> None + """Set span data for the response.""" + if not response: + return + + if should_send_default_pii() and integration.include_prompts: + response_texts = _extract_response_text(response) + if response_texts: + # Format as JSON string array as per documentation + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts)) + + tool_calls = extract_tool_calls(response) + if tool_calls: + # Tool calls should be JSON serialized + span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)) + + finish_reasons = extract_finish_reasons(response) + if finish_reasons: + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons + ) + + if getattr(response, "response_id", None): + span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id) + + if getattr(response, "model_version", None): + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version) + + usage_data = extract_usage_data(response) + + if usage_data["input_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage_data["input_tokens"]) + + if usage_data["input_tokens_cached"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + usage_data["input_tokens_cached"], + ) + + if usage_data["output_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage_data["output_tokens"]) + + if usage_data["output_tokens_reasoning"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + usage_data["output_tokens_reasoning"], + ) + + if usage_data["total_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"]) + + +def prepare_generate_content_args(args, kwargs): + # type: (tuple[Any, ...], dict[str, Any]) -> tuple[Any, Any, str] + """Extract and prepare common arguments for generate_content methods.""" + model = args[0] if args else kwargs.get("model", "unknown") + contents = args[1] if len(args) > 1 else kwargs.get("contents") + model_name = get_model_name(model) + + config = kwargs.get("config") + wrapped_config = wrapped_config_with_tools(config) + if wrapped_config is not config: + kwargs["config"] = wrapped_config + + return model, contents, model_name diff --git a/sentry_sdk/integrations/httpx.py b/sentry_sdk/integrations/httpx.py index 2ddd44489f..2ada95aad0 100644 --- a/sentry_sdk/integrations/httpx.py +++ b/sentry_sdk/integrations/httpx.py @@ -1,8 +1,13 @@ import sentry_sdk +from sentry_sdk import start_span from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import Integration, DidNotEnable from sentry_sdk.tracing import BAGGAGE_HEADER_NAME -from sentry_sdk.tracing_utils import Baggage, should_propagate_trace +from sentry_sdk.tracing_utils import ( + Baggage, + should_propagate_trace, + add_http_request_source, +) from sentry_sdk.utils import ( SENSITIVE_DATA_SUBSTITUTE, capture_internal_exceptions, @@ -52,7 +57,7 @@ def send(self, request, **kwargs): with capture_internal_exceptions(): parsed_url = parse_url(str(request.url), sanitize=False) - with sentry_sdk.start_span( + with start_span( op=OP.HTTP_CLIENT, name="%s %s" % ( @@ -88,7 +93,10 @@ def send(self, request, **kwargs): span.set_http_status(rv.status_code) span.set_data("reason", rv.reason_phrase) - return rv + with capture_internal_exceptions(): + add_http_request_source(span) + + return rv Client.send = send @@ -106,7 +114,7 @@ async def send(self, request, **kwargs): with capture_internal_exceptions(): parsed_url = parse_url(str(request.url), sanitize=False) - with sentry_sdk.start_span( + with start_span( op=OP.HTTP_CLIENT, name="%s %s" % ( @@ -144,7 +152,10 @@ async def send(self, request, **kwargs): span.set_http_status(rv.status_code) span.set_data("reason", rv.reason_phrase) - return rv + with capture_internal_exceptions(): + add_http_request_source(span) + + return rv AsyncClient.send = send diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index fdba26569d..724d908665 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -4,7 +4,12 @@ import sentry_sdk from sentry_sdk.ai.monitoring import set_ai_pipeline_name -from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function +from sentry_sdk.ai.utils import ( + GEN_AI_ALLOWED_MESSAGE_ROLES, + normalize_message_roles, + set_data_normalized, + get_start_span_function, +) from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -209,8 +214,18 @@ def on_llm_start( _set_tools_on_span(span, all_params.get("tools")) if should_send_default_pii() and self.include_prompts: + normalized_messages = [ + { + "role": GEN_AI_ALLOWED_MESSAGE_ROLES.USER, + "content": {"type": "text", "text": prompt}, + } + for prompt in prompts + ] set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts, unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + normalized_messages, + unpack=False, ) def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): @@ -262,6 +277,8 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): normalized_messages.append( self._normalize_langchain_message(message) ) + normalized_messages = normalize_message_roles(normalized_messages) + set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_MESSAGES, @@ -740,8 +757,12 @@ def new_invoke(self, *args, **kwargs): and should_send_default_pii() and integration.include_prompts ): + normalized_messages = normalize_message_roles([input]) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + normalized_messages, + unpack=False, ) output = result.get("output") @@ -791,8 +812,12 @@ def new_stream(self, *args, **kwargs): and should_send_default_pii() and integration.include_prompts ): + normalized_messages = normalize_message_roles([input]) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + normalized_messages, + unpack=False, ) # Run the agent diff --git a/sentry_sdk/integrations/langgraph.py b/sentry_sdk/integrations/langgraph.py index df3941bb13..11aa1facf4 100644 --- a/sentry_sdk/integrations/langgraph.py +++ b/sentry_sdk/integrations/langgraph.py @@ -2,7 +2,7 @@ from typing import Any, Callable, List, Optional import sentry_sdk -from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -180,10 +180,11 @@ def new_invoke(self, *args, **kwargs): ): input_messages = _parse_langgraph_messages(args[0]) if input_messages: + normalized_input_messages = normalize_message_roles(input_messages) set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_MESSAGES, - input_messages, + normalized_input_messages, unpack=False, ) @@ -230,10 +231,11 @@ async def new_ainvoke(self, *args, **kwargs): ): input_messages = _parse_langgraph_messages(args[0]) if input_messages: + normalized_input_messages = normalize_message_roles(input_messages) set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_MESSAGES, - input_messages, + normalized_input_messages, unpack=False, ) diff --git a/sentry_sdk/integrations/litellm.py b/sentry_sdk/integrations/litellm.py new file mode 100644 index 0000000000..1f047b1c1d --- /dev/null +++ b/sentry_sdk/integrations/litellm.py @@ -0,0 +1,255 @@ +from typing import TYPE_CHECKING + +import sentry_sdk +from sentry_sdk import consts +from sentry_sdk.ai.monitoring import record_token_usage +from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized +from sentry_sdk.consts import SPANDATA +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import event_from_exception + +if TYPE_CHECKING: + from typing import Any, Dict + from datetime import datetime + +try: + import litellm # type: ignore[import-not-found] +except ImportError: + raise DidNotEnable("LiteLLM not installed") + + +def _get_metadata_dict(kwargs): + # type: (Dict[str, Any]) -> Dict[str, Any] + """Get the metadata dictionary from the kwargs.""" + litellm_params = kwargs.setdefault("litellm_params", {}) + + # we need this weird little dance, as metadata might be set but may be None initially + metadata = litellm_params.get("metadata") + if metadata is None: + metadata = {} + litellm_params["metadata"] = metadata + return metadata + + +def _input_callback(kwargs): + # type: (Dict[str, Any]) -> None + """Handle the start of a request.""" + integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration) + + if integration is None: + return + + # Get key parameters + full_model = kwargs.get("model", "") + try: + model, provider, _, _ = litellm.get_llm_provider(full_model) + except Exception: + model = full_model + provider = "unknown" + + call_type = kwargs.get("call_type", None) + if call_type == "embedding": + operation = "embeddings" + else: + operation = "chat" + + # Start a new span/transaction + span = get_start_span_function()( + op=( + consts.OP.GEN_AI_CHAT + if operation == "chat" + else consts.OP.GEN_AI_EMBEDDINGS + ), + name=f"{operation} {model}", + origin=LiteLLMIntegration.origin, + ) + span.__enter__() + + # Store span for later + _get_metadata_dict(kwargs)["_sentry_span"] = span + + # Set basic data + set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, provider) + set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation) + + # Record messages if allowed + messages = kwargs.get("messages", []) + if messages and should_send_default_pii() and integration.include_prompts: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + ) + + # Record other parameters + params = { + "model": SPANDATA.GEN_AI_REQUEST_MODEL, + "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING, + "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, + "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, + "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + } + for key, attribute in params.items(): + value = kwargs.get(key) + if value is not None: + set_data_normalized(span, attribute, value) + + # Record LiteLLM-specific parameters + litellm_params = { + "api_base": kwargs.get("api_base"), + "api_version": kwargs.get("api_version"), + "custom_llm_provider": kwargs.get("custom_llm_provider"), + } + for key, value in litellm_params.items(): + if value is not None: + set_data_normalized(span, f"gen_ai.litellm.{key}", value) + + +def _success_callback(kwargs, completion_response, start_time, end_time): + # type: (Dict[str, Any], Any, datetime, datetime) -> None + """Handle successful completion.""" + + span = _get_metadata_dict(kwargs).get("_sentry_span") + if span is None: + return + + integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration) + if integration is None: + return + + try: + # Record model information + if hasattr(completion_response, "model"): + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_MODEL, completion_response.model + ) + + # Record response content if allowed + if should_send_default_pii() and integration.include_prompts: + if hasattr(completion_response, "choices"): + response_messages = [] + for choice in completion_response.choices: + if hasattr(choice, "message"): + if hasattr(choice.message, "model_dump"): + response_messages.append(choice.message.model_dump()) + elif hasattr(choice.message, "dict"): + response_messages.append(choice.message.dict()) + else: + # Fallback for basic message objects + msg = {} + if hasattr(choice.message, "role"): + msg["role"] = choice.message.role + if hasattr(choice.message, "content"): + msg["content"] = choice.message.content + if hasattr(choice.message, "tool_calls"): + msg["tool_calls"] = choice.message.tool_calls + response_messages.append(msg) + + if response_messages: + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_messages + ) + + # Record token usage + if hasattr(completion_response, "usage"): + usage = completion_response.usage + record_token_usage( + span, + input_tokens=getattr(usage, "prompt_tokens", None), + output_tokens=getattr(usage, "completion_tokens", None), + total_tokens=getattr(usage, "total_tokens", None), + ) + + finally: + # Always finish the span and clean up + span.__exit__(None, None, None) + + +def _failure_callback(kwargs, exception, start_time, end_time): + # type: (Dict[str, Any], Exception, datetime, datetime) -> None + """Handle request failure.""" + span = _get_metadata_dict(kwargs).get("_sentry_span") + if span is None: + return + + try: + # Capture the exception + event, hint = event_from_exception( + exception, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "litellm", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + finally: + # Always finish the span and clean up + span.__exit__(type(exception), exception, None) + + +class LiteLLMIntegration(Integration): + """ + LiteLLM integration for Sentry. + + This integration automatically captures LiteLLM API calls and sends them to Sentry + for monitoring and error tracking. It supports all 100+ LLM providers that LiteLLM + supports, including OpenAI, Anthropic, Google, Cohere, and many others. + + Features: + - Automatic exception capture for all LiteLLM calls + - Token usage tracking across all providers + - Provider detection and attribution + - Input/output message capture (configurable) + - Streaming response support + - Cost tracking integration + + Usage: + + ```python + import litellm + import sentry_sdk + + # Initialize Sentry with the LiteLLM integration + sentry_sdk.init( + dsn="your-dsn", + send_default_pii=True + integrations=[ + sentry_sdk.integrations.LiteLLMIntegration( + include_prompts=True # Set to False to exclude message content + ) + ] + ) + + # All LiteLLM calls will now be monitored + response = litellm.completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello!"}] + ) + ``` + + Configuration: + - include_prompts (bool): Whether to include prompts and responses in spans. + Defaults to True. Set to False to exclude potentially sensitive data. + """ + + identifier = "litellm" + origin = f"auto.ai.{identifier}" + + def __init__(self, include_prompts=True): + # type: (LiteLLMIntegration, bool) -> None + self.include_prompts = include_prompts + + @staticmethod + def setup_once(): + # type: () -> None + """Set up LiteLLM callbacks for monitoring.""" + litellm.input_callback = litellm.input_callback or [] + if _input_callback not in litellm.input_callback: + litellm.input_callback.append(_input_callback) + + litellm.success_callback = litellm.success_callback or [] + if _success_callback not in litellm.success_callback: + litellm.success_callback.append(_success_callback) + + litellm.failure_callback = litellm.failure_callback or [] + if _failure_callback not in litellm.failure_callback: + litellm.failure_callback.append(_failure_callback) diff --git a/sentry_sdk/integrations/litestar.py b/sentry_sdk/integrations/litestar.py index 745a00bcba..0cb9f4b972 100644 --- a/sentry_sdk/integrations/litestar.py +++ b/sentry_sdk/integrations/litestar.py @@ -1,4 +1,6 @@ from collections.abc import Set +from copy import deepcopy + import sentry_sdk from sentry_sdk.consts import OP from sentry_sdk.integrations import ( @@ -260,7 +262,7 @@ def event_processor(event, _): event.update( { - "request": request_info, + "request": deepcopy(request_info), "transaction": tx_name, "transaction_info": tx_info, } diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py index bfb30fc67b..7e16943b28 100644 --- a/sentry_sdk/integrations/logging.py +++ b/sentry_sdk/integrations/logging.py @@ -409,7 +409,7 @@ def _capture_log_from_record(self, client, record): attrs["logger.name"] = record.name # noinspection PyProtectedMember - client._capture_experimental_log( + client._capture_log( { "severity_text": otel_severity_text, "severity_number": otel_severity_number, diff --git a/sentry_sdk/integrations/loguru.py b/sentry_sdk/integrations/loguru.py index b910b9a407..2c0279d0ce 100644 --- a/sentry_sdk/integrations/loguru.py +++ b/sentry_sdk/integrations/loguru.py @@ -193,7 +193,7 @@ def loguru_sentry_logs_handler(message): if record.get("name"): attrs["logger.name"] = record["name"] - client._capture_experimental_log( + client._capture_log( { "severity_text": otel_severity_text, "severity_number": otel_severity_number, diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index e8b3b30ab2..19d7717b3c 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -1,9 +1,10 @@ from functools import wraps +from collections.abc import Iterable import sentry_sdk from sentry_sdk import consts from sentry_sdk.ai.monitoring import record_token_usage -from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -17,14 +18,19 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator + from typing import Any, List, Optional, Callable, AsyncIterator, Iterator from sentry_sdk.tracing import Span try: try: - from openai import NOT_GIVEN + from openai import NotGiven except ImportError: - NOT_GIVEN = None + NotGiven = None + + try: + from openai import Omit + except ImportError: + Omit = None from openai.resources.chat.completions import Completions, AsyncCompletions from openai.resources import Embeddings, AsyncEmbeddings @@ -182,8 +188,9 @@ def _set_input_data(span, kwargs, operation, integration): and should_send_default_pii() and integration.include_prompts ): + normalized_messages = normalize_message_roles(messages) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False ) # Input attributes: Common @@ -203,12 +210,12 @@ def _set_input_data(span, kwargs, operation, integration): for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) - if value is not NOT_GIVEN and value is not None: + if value is not None and _is_given(value): set_data_normalized(span, attribute, value) # Input attributes: Tools tools = kwargs.get("tools") - if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: + if tools is not None and _is_given(tools) and len(tools) > 0: set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) @@ -688,3 +695,15 @@ async def _sentry_patched_responses_async(*args, **kwargs): return await _execute_async(f, *args, **kwargs) return _sentry_patched_responses_async + + +def _is_given(obj): + # type: (Any) -> bool + """ + Check for givenness safely across different openai versions. + """ + if NotGiven is not None and isinstance(obj, NotGiven): + return False + if Omit is not None and isinstance(obj, Omit): + return False + return True diff --git a/sentry_sdk/integrations/openai_agents/spans/ai_client.py b/sentry_sdk/integrations/openai_agents/spans/ai_client.py index e215edfd26..88b403ba85 100644 --- a/sentry_sdk/integrations/openai_agents/spans/ai_client.py +++ b/sentry_sdk/integrations/openai_agents/spans/ai_client.py @@ -7,6 +7,7 @@ _set_input_data, _set_output_data, _set_usage_data, + _create_mcp_execute_tool_spans, ) from typing import TYPE_CHECKING @@ -38,3 +39,4 @@ def update_ai_client_span(span, agent, get_response_kwargs, result): _set_usage_data(span, result.usage) _set_input_data(span, get_response_kwargs) _set_output_data(span, result) + _create_mcp_execute_tool_spans(span, result) diff --git a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py index cf06120625..2a9c5ebe66 100644 --- a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +++ b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py @@ -1,5 +1,9 @@ import sentry_sdk -from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized +from sentry_sdk.ai.utils import ( + get_start_span_function, + set_data_normalized, + normalize_message_roles, +) from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import safe_serialize @@ -56,8 +60,12 @@ def invoke_agent_span(context, agent, kwargs): ) if len(messages) > 0: + normalized_messages = normalize_message_roles(messages) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + normalized_messages, + unpack=False, ) _set_agent_data(span, agent) diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index 73d2858e7f..125ff1175b 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -1,6 +1,11 @@ import sentry_sdk -from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import SPANDATA +from sentry_sdk.ai.utils import ( + GEN_AI_ALLOWED_MESSAGE_ROLES, + normalize_message_roles, + set_data_normalized, + normalize_message_role, +) +from sentry_sdk.consts import SPANDATA, SPANSTATUS, OP from sentry_sdk.integrations import DidNotEnable from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing_utils import set_span_errored @@ -94,35 +99,47 @@ def _set_input_data(span, get_response_kwargs): # type: (sentry_sdk.tracing.Span, dict[str, Any]) -> None if not should_send_default_pii(): return + request_messages = [] - messages_by_role = { - "system": [], - "user": [], - "assistant": [], - "tool": [], - } # type: (dict[str, list[Any]]) system_instructions = get_response_kwargs.get("system_instructions") if system_instructions: - messages_by_role["system"].append({"type": "text", "text": system_instructions}) + request_messages.append( + { + "role": GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM, + "content": [{"type": "text", "text": system_instructions}], + } + ) for message in get_response_kwargs.get("input", []): if "role" in message: - messages_by_role[message.get("role")].append( - {"type": "text", "text": message.get("content")} + normalized_role = normalize_message_role(message.get("role")) + request_messages.append( + { + "role": normalized_role, + "content": [{"type": "text", "text": message.get("content")}], + } ) else: if message.get("type") == "function_call": - messages_by_role["assistant"].append(message) + request_messages.append( + { + "role": GEN_AI_ALLOWED_MESSAGE_ROLES.ASSISTANT, + "content": [message], + } + ) elif message.get("type") == "function_call_output": - messages_by_role["tool"].append(message) - - request_messages = [] - for role, messages in messages_by_role.items(): - if len(messages) > 0: - request_messages.append({"role": role, "content": messages}) + request_messages.append( + { + "role": GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL, + "content": [message], + } + ) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + normalize_message_roles(request_messages), + unpack=False, ) @@ -156,3 +173,27 @@ def _set_output_data(span, result): set_data_normalized( span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) + + +def _create_mcp_execute_tool_spans(span, result): + # type: (sentry_sdk.tracing.Span, agents.Result) -> None + for output in result.output: + if output.__class__.__name__ == "McpCall": + with sentry_sdk.start_span( + op=OP.GEN_AI_EXECUTE_TOOL, + description=f"execute_tool {output.name}", + start_timestamp=span.start_timestamp, + ) as execute_tool_span: + set_data_normalized(execute_tool_span, SPANDATA.GEN_AI_TOOL_TYPE, "mcp") + set_data_normalized( + execute_tool_span, SPANDATA.GEN_AI_TOOL_NAME, output.name + ) + if should_send_default_pii(): + execute_tool_span.set_data( + SPANDATA.GEN_AI_TOOL_INPUT, output.arguments + ) + execute_tool_span.set_data( + SPANDATA.GEN_AI_TOOL_OUTPUT, output.output + ) + if output.error: + execute_tool_span.set_status(SPANSTATUS.ERROR) diff --git a/sentry_sdk/integrations/ray.py b/sentry_sdk/integrations/ray.py index 8d6cdc1201..08e78b7585 100644 --- a/sentry_sdk/integrations/ray.py +++ b/sentry_sdk/integrations/ray.py @@ -1,4 +1,5 @@ import inspect +import functools import sys import sentry_sdk @@ -17,7 +18,6 @@ import ray # type: ignore[import-not-found] except ImportError: raise DidNotEnable("Ray not installed.") -import functools from typing import TYPE_CHECKING @@ -54,12 +54,13 @@ def new_remote(f=None, *args, **kwargs): def wrapper(user_f): # type: (Callable[..., Any]) -> Any - def new_func(*f_args, _tracing=None, **f_kwargs): + @functools.wraps(user_f) + def new_func(*f_args, _sentry_tracing=None, **f_kwargs): # type: (Any, Optional[dict[str, Any]], Any) -> Any _check_sentry_initialized() transaction = sentry_sdk.continue_trace( - _tracing or {}, + _sentry_tracing or {}, op=OP.QUEUE_TASK_RAY, name=qualname_from_function(user_f), origin=RayIntegration.origin, @@ -78,6 +79,19 @@ def new_func(*f_args, _tracing=None, **f_kwargs): return result + # Patching new_func signature to add the _sentry_tracing parameter to it + # Ray later inspects the signature and finds the unexpected parameter otherwise + signature = inspect.signature(new_func) + params = list(signature.parameters.values()) + params.append( + inspect.Parameter( + "_sentry_tracing", + kind=inspect.Parameter.KEYWORD_ONLY, + default=None, + ) + ) + new_func.__signature__ = signature.replace(parameters=params) # type: ignore[attr-defined] + if f: rv = old_remote(new_func) else: @@ -99,7 +113,9 @@ def _remote_method_with_header_propagation(*args, **kwargs): for k, v in sentry_sdk.get_current_scope().iter_trace_propagation_headers() } try: - result = old_remote_method(*args, **kwargs, _tracing=tracing) + result = old_remote_method( + *args, **kwargs, _sentry_tracing=tracing + ) span.set_status(SPANSTATUS.OK) except Exception: span.set_status(SPANSTATUS.INTERNAL_ERROR) diff --git a/sentry_sdk/integrations/redis/utils.py b/sentry_sdk/integrations/redis/utils.py index cf230f6648..7bb73f3372 100644 --- a/sentry_sdk/integrations/redis/utils.py +++ b/sentry_sdk/integrations/redis/utils.py @@ -20,12 +20,13 @@ def _get_safe_command(name, args): # type: (str, Sequence[Any]) -> str command_parts = [name] + name_low = name.lower() + send_default_pii = should_send_default_pii() + for i, arg in enumerate(args): if i > _MAX_NUM_ARGS: break - name_low = name.lower() - if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA: command_parts.append(SENSITIVE_DATA_SUBSTITUTE) continue @@ -33,9 +34,8 @@ def _get_safe_command(name, args): arg_is_the_key = i == 0 if arg_is_the_key: command_parts.append(repr(arg)) - else: - if should_send_default_pii(): + if send_default_pii: command_parts.append(repr(arg)) else: command_parts.append(SENSITIVE_DATA_SUBSTITUTE) diff --git a/sentry_sdk/integrations/starlite.py b/sentry_sdk/integrations/starlite.py index daab82d642..855b87ad60 100644 --- a/sentry_sdk/integrations/starlite.py +++ b/sentry_sdk/integrations/starlite.py @@ -1,3 +1,5 @@ +from copy import deepcopy + import sentry_sdk from sentry_sdk.consts import OP from sentry_sdk.integrations import DidNotEnable, Integration @@ -237,7 +239,7 @@ def event_processor(event, _): event.update( { - "request": request_info, + "request": deepcopy(request_info), "transaction": tx_name, "transaction_info": tx_info, } diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py index d388c5bca6..3db97e5685 100644 --- a/sentry_sdk/integrations/stdlib.py +++ b/sentry_sdk/integrations/stdlib.py @@ -8,7 +8,11 @@ from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import Integration from sentry_sdk.scope import add_global_event_processor -from sentry_sdk.tracing_utils import EnvironHeaders, should_propagate_trace +from sentry_sdk.tracing_utils import ( + EnvironHeaders, + should_propagate_trace, + add_http_request_source, +) from sentry_sdk.utils import ( SENSITIVE_DATA_SUBSTITUTE, capture_internal_exceptions, @@ -135,6 +139,9 @@ def getresponse(self, *args, **kwargs): finally: span.finish() + with capture_internal_exceptions(): + add_http_request_source(span) + return rv HTTPConnection.putrequest = putrequest # type: ignore[method-assign] diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py index c031c51f50..cfe54c829c 100644 --- a/sentry_sdk/integrations/threading.py +++ b/sentry_sdk/integrations/threading.py @@ -2,6 +2,7 @@ import warnings from functools import wraps from threading import Thread, current_thread +from concurrent.futures import ThreadPoolExecutor, Future import sentry_sdk from sentry_sdk.integrations import Integration @@ -24,6 +25,7 @@ from sentry_sdk._types import ExcInfo F = TypeVar("F", bound=Callable[..., Any]) + T = TypeVar("T", bound=Any) class ThreadingIntegration(Integration): @@ -59,6 +61,15 @@ def setup_once(): django_version = None channels_version = None + is_async_emulated_with_threads = ( + sys.version_info < (3, 9) + and channels_version is not None + and channels_version < "4.0.0" + and django_version is not None + and django_version >= (3, 0) + and django_version < (4, 0) + ) + @wraps(old_start) def sentry_start(self, *a, **kw): # type: (Thread, *Any, **Any) -> Any @@ -67,14 +78,7 @@ def sentry_start(self, *a, **kw): return old_start(self, *a, **kw) if integration.propagate_scope: - if ( - sys.version_info < (3, 9) - and channels_version is not None - and channels_version < "4.0.0" - and django_version is not None - and django_version >= (3, 0) - and django_version < (4, 0) - ): + if is_async_emulated_with_threads: warnings.warn( "There is a known issue with Django channels 2.x and 3.x when using Python 3.8 or older. " "(Async support is emulated using threads and some Sentry data may be leaked between those threads.) " @@ -109,6 +113,9 @@ def sentry_start(self, *a, **kw): return old_start(self, *a, **kw) Thread.start = sentry_start # type: ignore + ThreadPoolExecutor.submit = _wrap_threadpool_executor_submit( # type: ignore + ThreadPoolExecutor.submit, is_async_emulated_with_threads + ) def _wrap_run(isolation_scope_to_use, current_scope_to_use, old_run_func): @@ -134,6 +141,43 @@ def _run_old_run_func(): return run # type: ignore +def _wrap_threadpool_executor_submit(func, is_async_emulated_with_threads): + # type: (Callable[..., Future[T]], bool) -> Callable[..., Future[T]] + """ + Wrap submit call to propagate scopes on task submission. + """ + + @wraps(func) + def sentry_submit(self, fn, *args, **kwargs): + # type: (ThreadPoolExecutor, Callable[..., T], *Any, **Any) -> Future[T] + integration = sentry_sdk.get_client().get_integration(ThreadingIntegration) + if integration is None: + return func(self, fn, *args, **kwargs) + + if integration.propagate_scope and is_async_emulated_with_threads: + isolation_scope = sentry_sdk.get_isolation_scope() + current_scope = sentry_sdk.get_current_scope() + elif integration.propagate_scope: + isolation_scope = sentry_sdk.get_isolation_scope().fork() + current_scope = sentry_sdk.get_current_scope().fork() + else: + isolation_scope = None + current_scope = None + + def wrapped_fn(*args, **kwargs): + # type: (*Any, **Any) -> Any + if isolation_scope is not None and current_scope is not None: + with use_isolation_scope(isolation_scope): + with use_scope(current_scope): + return fn(*args, **kwargs) + + return fn(*args, **kwargs) + + return func(self, wrapped_fn, *args, **kwargs) + + return sentry_submit + + def _capture_exception(): # type: () -> ExcInfo exc_info = sys.exc_info() diff --git a/sentry_sdk/logger.py b/sentry_sdk/logger.py index bc98f35155..0ea7218e01 100644 --- a/sentry_sdk/logger.py +++ b/sentry_sdk/logger.py @@ -46,7 +46,7 @@ def _capture_log(severity_text, severity_number, template, **kwargs): } # noinspection PyProtectedMember - client._capture_experimental_log( + client._capture_log( { "severity_text": severity_text, "severity_number": severity_number, diff --git a/sentry_sdk/metrics.py b/sentry_sdk/metrics.py deleted file mode 100644 index d0041114ce..0000000000 --- a/sentry_sdk/metrics.py +++ /dev/null @@ -1,971 +0,0 @@ -import io -import os -import random -import re -import sys -import threading -import time -import warnings -import zlib -from abc import ABC, abstractmethod -from contextlib import contextmanager -from datetime import datetime, timezone -from functools import wraps, partial - -import sentry_sdk -from sentry_sdk.utils import ( - ContextVar, - now, - nanosecond_time, - to_timestamp, - serialize_frame, - json_dumps, -) -from sentry_sdk.envelope import Envelope, Item -from sentry_sdk.tracing import TransactionSource - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from typing import Any - from typing import Callable - from typing import Dict - from typing import Generator - from typing import Iterable - from typing import List - from typing import Optional - from typing import Set - from typing import Tuple - from typing import Union - - from sentry_sdk._types import BucketKey - from sentry_sdk._types import DurationUnit - from sentry_sdk._types import FlushedMetricValue - from sentry_sdk._types import MeasurementUnit - from sentry_sdk._types import MetricMetaKey - from sentry_sdk._types import MetricTagValue - from sentry_sdk._types import MetricTags - from sentry_sdk._types import MetricTagsInternal - from sentry_sdk._types import MetricType - from sentry_sdk._types import MetricValue - - -warnings.warn( - "The sentry_sdk.metrics module is deprecated and will be removed in the next major release. " - "Sentry will reject all metrics sent after October 7, 2024. " - "Learn more: https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics", - DeprecationWarning, - stacklevel=2, -) - -_in_metrics = ContextVar("in_metrics", default=False) -_set = set # set is shadowed below - -GOOD_TRANSACTION_SOURCES = frozenset( - [ - TransactionSource.ROUTE, - TransactionSource.VIEW, - TransactionSource.COMPONENT, - TransactionSource.TASK, - ] -) - -_sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "") -_sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_") -_sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "") - - -def _sanitize_tag_value(value): - # type: (str) -> str - table = str.maketrans( - { - "\n": "\\n", - "\r": "\\r", - "\t": "\\t", - "\\": "\\\\", - "|": "\\u{7c}", - ",": "\\u{2c}", - } - ) - return value.translate(table) - - -def get_code_location(stacklevel): - # type: (int) -> Optional[Dict[str, Any]] - try: - frm = sys._getframe(stacklevel) - except Exception: - return None - - return serialize_frame( - frm, include_local_variables=False, include_source_context=True - ) - - -@contextmanager -def recursion_protection(): - # type: () -> Generator[bool, None, None] - """Enters recursion protection and returns the old flag.""" - old_in_metrics = _in_metrics.get() - _in_metrics.set(True) - try: - yield old_in_metrics - finally: - _in_metrics.set(old_in_metrics) - - -def metrics_noop(func): - # type: (Any) -> Any - """Convenient decorator that uses `recursion_protection` to - make a function a noop. - """ - - @wraps(func) - def new_func(*args, **kwargs): - # type: (*Any, **Any) -> Any - with recursion_protection() as in_metrics: - if not in_metrics: - return func(*args, **kwargs) - - return new_func - - -class Metric(ABC): - __slots__ = () - - @abstractmethod - def __init__(self, first): - # type: (MetricValue) -> None - pass - - @property - @abstractmethod - def weight(self): - # type: () -> int - pass - - @abstractmethod - def add(self, value): - # type: (MetricValue) -> None - pass - - @abstractmethod - def serialize_value(self): - # type: () -> Iterable[FlushedMetricValue] - pass - - -class CounterMetric(Metric): - __slots__ = ("value",) - - def __init__( - self, - first, # type: MetricValue - ): - # type: (...) -> None - self.value = float(first) - - @property - def weight(self): - # type: (...) -> int - return 1 - - def add( - self, - value, # type: MetricValue - ): - # type: (...) -> None - self.value += float(value) - - def serialize_value(self): - # type: (...) -> Iterable[FlushedMetricValue] - return (self.value,) - - -class GaugeMetric(Metric): - __slots__ = ( - "last", - "min", - "max", - "sum", - "count", - ) - - def __init__( - self, - first, # type: MetricValue - ): - # type: (...) -> None - first = float(first) - self.last = first - self.min = first - self.max = first - self.sum = first - self.count = 1 - - @property - def weight(self): - # type: (...) -> int - # Number of elements. - return 5 - - def add( - self, - value, # type: MetricValue - ): - # type: (...) -> None - value = float(value) - self.last = value - self.min = min(self.min, value) - self.max = max(self.max, value) - self.sum += value - self.count += 1 - - def serialize_value(self): - # type: (...) -> Iterable[FlushedMetricValue] - return ( - self.last, - self.min, - self.max, - self.sum, - self.count, - ) - - -class DistributionMetric(Metric): - __slots__ = ("value",) - - def __init__( - self, - first, # type: MetricValue - ): - # type(...) -> None - self.value = [float(first)] - - @property - def weight(self): - # type: (...) -> int - return len(self.value) - - def add( - self, - value, # type: MetricValue - ): - # type: (...) -> None - self.value.append(float(value)) - - def serialize_value(self): - # type: (...) -> Iterable[FlushedMetricValue] - return self.value - - -class SetMetric(Metric): - __slots__ = ("value",) - - def __init__( - self, - first, # type: MetricValue - ): - # type: (...) -> None - self.value = {first} - - @property - def weight(self): - # type: (...) -> int - return len(self.value) - - def add( - self, - value, # type: MetricValue - ): - # type: (...) -> None - self.value.add(value) - - def serialize_value(self): - # type: (...) -> Iterable[FlushedMetricValue] - def _hash(x): - # type: (MetricValue) -> int - if isinstance(x, str): - return zlib.crc32(x.encode("utf-8")) & 0xFFFFFFFF - return int(x) - - return (_hash(value) for value in self.value) - - -def _encode_metrics(flushable_buckets): - # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) -> bytes - out = io.BytesIO() - _write = out.write - - # Note on sanitization: we intentionally sanitize in emission (serialization) - # and not during aggregation for performance reasons. This means that the - # envelope can in fact have duplicate buckets stored. This is acceptable for - # relay side emission and should not happen commonly. - - for timestamp, buckets in flushable_buckets: - for bucket_key, metric in buckets.items(): - metric_type, metric_name, metric_unit, metric_tags = bucket_key - metric_name = _sanitize_metric_key(metric_name) - metric_unit = _sanitize_unit(metric_unit) - _write(metric_name.encode("utf-8")) - _write(b"@") - _write(metric_unit.encode("utf-8")) - - for serialized_value in metric.serialize_value(): - _write(b":") - _write(str(serialized_value).encode("utf-8")) - - _write(b"|") - _write(metric_type.encode("ascii")) - - if metric_tags: - _write(b"|#") - first = True - for tag_key, tag_value in metric_tags: - tag_key = _sanitize_tag_key(tag_key) - if not tag_key: - continue - if first: - first = False - else: - _write(b",") - _write(tag_key.encode("utf-8")) - _write(b":") - _write(_sanitize_tag_value(tag_value).encode("utf-8")) - - _write(b"|T") - _write(str(timestamp).encode("ascii")) - _write(b"\n") - - return out.getvalue() - - -def _encode_locations(timestamp, code_locations): - # type: (int, Iterable[Tuple[MetricMetaKey, Dict[str, Any]]]) -> bytes - mapping = {} # type: Dict[str, List[Any]] - - for key, loc in code_locations: - metric_type, name, unit = key - mri = "{}:{}@{}".format( - metric_type, _sanitize_metric_key(name), _sanitize_unit(unit) - ) - - loc["type"] = "location" - mapping.setdefault(mri, []).append(loc) - - return json_dumps({"timestamp": timestamp, "mapping": mapping}) - - -METRIC_TYPES = { - "c": CounterMetric, - "g": GaugeMetric, - "d": DistributionMetric, - "s": SetMetric, -} # type: dict[MetricType, type[Metric]] - -# some of these are dumb -TIMING_FUNCTIONS = { - "nanosecond": nanosecond_time, - "microsecond": lambda: nanosecond_time() / 1000.0, - "millisecond": lambda: nanosecond_time() / 1000000.0, - "second": now, - "minute": lambda: now() / 60.0, - "hour": lambda: now() / 3600.0, - "day": lambda: now() / 3600.0 / 24.0, - "week": lambda: now() / 3600.0 / 24.0 / 7.0, -} - - -class LocalAggregator: - __slots__ = ("_measurements",) - - def __init__(self): - # type: (...) -> None - self._measurements = {} # type: Dict[Tuple[str, MetricTagsInternal], Tuple[float, float, int, float]] - - def add( - self, - ty, # type: MetricType - key, # type: str - value, # type: float - unit, # type: MeasurementUnit - tags, # type: MetricTagsInternal - ): - # type: (...) -> None - export_key = "%s:%s@%s" % (ty, key, unit) - bucket_key = (export_key, tags) - - old = self._measurements.get(bucket_key) - if old is not None: - v_min, v_max, v_count, v_sum = old - v_min = min(v_min, value) - v_max = max(v_max, value) - v_count += 1 - v_sum += value - else: - v_min = v_max = v_sum = value - v_count = 1 - self._measurements[bucket_key] = (v_min, v_max, v_count, v_sum) - - def to_json(self): - # type: (...) -> Dict[str, Any] - rv = {} # type: Any - for (export_key, tags), ( - v_min, - v_max, - v_count, - v_sum, - ) in self._measurements.items(): - rv.setdefault(export_key, []).append( - { - "tags": _tags_to_dict(tags), - "min": v_min, - "max": v_max, - "count": v_count, - "sum": v_sum, - } - ) - return rv - - -class MetricsAggregator: - ROLLUP_IN_SECONDS = 10.0 - MAX_WEIGHT = 100000 - FLUSHER_SLEEP_TIME = 5.0 - - def __init__( - self, - capture_func, # type: Callable[[Envelope], None] - enable_code_locations=False, # type: bool - ): - # type: (...) -> None - self.buckets = {} # type: Dict[int, Any] - self._enable_code_locations = enable_code_locations - self._seen_locations = _set() # type: Set[Tuple[int, MetricMetaKey]] - self._pending_locations = {} # type: Dict[int, List[Tuple[MetricMetaKey, Any]]] - self._buckets_total_weight = 0 - self._capture_func = capture_func - self._running = True - self._lock = threading.Lock() - - self._flush_event = threading.Event() # type: threading.Event - self._force_flush = False - - # The aggregator shifts its flushing by up to an entire rollup window to - # avoid multiple clients trampling on end of a 10 second window as all the - # buckets are anchored to multiples of ROLLUP seconds. We randomize this - # number once per aggregator boot to achieve some level of offsetting - # across a fleet of deployed SDKs. Relay itself will also apply independent - # jittering. - self._flush_shift = random.random() * self.ROLLUP_IN_SECONDS - - self._flusher = None # type: Optional[threading.Thread] - self._flusher_pid = None # type: Optional[int] - - def _ensure_thread(self): - # type: (...) -> bool - """For forking processes we might need to restart this thread. - This ensures that our process actually has that thread running. - """ - if not self._running: - return False - - pid = os.getpid() - if self._flusher_pid == pid: - return True - - with self._lock: - # Recheck to make sure another thread didn't get here and start the - # the flusher in the meantime - if self._flusher_pid == pid: - return True - - self._flusher_pid = pid - - self._flusher = threading.Thread(target=self._flush_loop) - self._flusher.daemon = True - - try: - self._flusher.start() - except RuntimeError: - # Unfortunately at this point the interpreter is in a state that no - # longer allows us to spawn a thread and we have to bail. - self._running = False - return False - - return True - - def _flush_loop(self): - # type: (...) -> None - _in_metrics.set(True) - while self._running or self._force_flush: - if self._running: - self._flush_event.wait(self.FLUSHER_SLEEP_TIME) - self._flush() - - def _flush(self): - # type: (...) -> None - self._emit(self._flushable_buckets(), self._flushable_locations()) - - def _flushable_buckets(self): - # type: (...) -> (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) - with self._lock: - force_flush = self._force_flush - cutoff = time.time() - self.ROLLUP_IN_SECONDS - self._flush_shift - flushable_buckets = () # type: Iterable[Tuple[int, Dict[BucketKey, Metric]]] - weight_to_remove = 0 - - if force_flush: - flushable_buckets = self.buckets.items() - self.buckets = {} - self._buckets_total_weight = 0 - self._force_flush = False - else: - flushable_buckets = [] - for buckets_timestamp, buckets in self.buckets.items(): - # If the timestamp of the bucket is newer that the rollup we want to skip it. - if buckets_timestamp <= cutoff: - flushable_buckets.append((buckets_timestamp, buckets)) - - # We will clear the elements while holding the lock, in order to avoid requesting it downstream again. - for buckets_timestamp, buckets in flushable_buckets: - for metric in buckets.values(): - weight_to_remove += metric.weight - del self.buckets[buckets_timestamp] - - self._buckets_total_weight -= weight_to_remove - - return flushable_buckets - - def _flushable_locations(self): - # type: (...) -> Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]] - with self._lock: - locations = self._pending_locations - self._pending_locations = {} - return locations - - @metrics_noop - def add( - self, - ty, # type: MetricType - key, # type: str - value, # type: MetricValue - unit, # type: MeasurementUnit - tags, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - local_aggregator=None, # type: Optional[LocalAggregator] - stacklevel=0, # type: Optional[int] - ): - # type: (...) -> None - if not self._ensure_thread() or self._flusher is None: - return None - - if timestamp is None: - timestamp = time.time() - elif isinstance(timestamp, datetime): - timestamp = to_timestamp(timestamp) - - bucket_timestamp = int( - (timestamp // self.ROLLUP_IN_SECONDS) * self.ROLLUP_IN_SECONDS - ) - serialized_tags = _serialize_tags(tags) - bucket_key = ( - ty, - key, - unit, - serialized_tags, - ) - - with self._lock: - local_buckets = self.buckets.setdefault(bucket_timestamp, {}) - metric = local_buckets.get(bucket_key) - if metric is not None: - previous_weight = metric.weight - metric.add(value) - else: - metric = local_buckets[bucket_key] = METRIC_TYPES[ty](value) - previous_weight = 0 - - added = metric.weight - previous_weight - - if stacklevel is not None: - self.record_code_location(ty, key, unit, stacklevel + 2, timestamp) - - # Given the new weight we consider whether we want to force flush. - self._consider_force_flush() - - # For sets, we only record that a value has been added to the set but not which one. - # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets - if local_aggregator is not None: - local_value = float(added if ty == "s" else value) - local_aggregator.add(ty, key, local_value, unit, serialized_tags) - - def record_code_location( - self, - ty, # type: MetricType - key, # type: str - unit, # type: MeasurementUnit - stacklevel, # type: int - timestamp=None, # type: Optional[float] - ): - # type: (...) -> None - if not self._enable_code_locations: - return - if timestamp is None: - timestamp = time.time() - meta_key = (ty, key, unit) - start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace( - hour=0, minute=0, second=0, microsecond=0, tzinfo=None - ) - start_of_day = int(to_timestamp(start_of_day)) - - if (start_of_day, meta_key) not in self._seen_locations: - self._seen_locations.add((start_of_day, meta_key)) - loc = get_code_location(stacklevel + 3) - if loc is not None: - # Group metadata by day to make flushing more efficient. - # There needs to be one envelope item per timestamp. - self._pending_locations.setdefault(start_of_day, []).append( - (meta_key, loc) - ) - - @metrics_noop - def need_code_location( - self, - ty, # type: MetricType - key, # type: str - unit, # type: MeasurementUnit - timestamp, # type: float - ): - # type: (...) -> bool - if self._enable_code_locations: - return False - meta_key = (ty, key, unit) - start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace( - hour=0, minute=0, second=0, microsecond=0, tzinfo=None - ) - start_of_day = int(to_timestamp(start_of_day)) - return (start_of_day, meta_key) not in self._seen_locations - - def kill(self): - # type: (...) -> None - if self._flusher is None: - return - - self._running = False - self._flush_event.set() - self._flusher = None - - @metrics_noop - def flush(self): - # type: (...) -> None - self._force_flush = True - self._flush() - - def _consider_force_flush(self): - # type: (...) -> None - # It's important to acquire a lock around this method, since it will touch shared data structures. - total_weight = len(self.buckets) + self._buckets_total_weight - if total_weight >= self.MAX_WEIGHT: - self._force_flush = True - self._flush_event.set() - - def _emit( - self, - flushable_buckets, # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) - code_locations, # type: Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]] - ): - # type: (...) -> Optional[Envelope] - envelope = Envelope() - - if flushable_buckets: - encoded_metrics = _encode_metrics(flushable_buckets) - envelope.add_item(Item(payload=encoded_metrics, type="statsd")) - - for timestamp, locations in code_locations.items(): - encoded_locations = _encode_locations(timestamp, locations) - envelope.add_item(Item(payload=encoded_locations, type="metric_meta")) - - if envelope.items: - self._capture_func(envelope) - return envelope - return None - - -def _serialize_tags( - tags, # type: Optional[MetricTags] -): - # type: (...) -> MetricTagsInternal - if not tags: - return () - - rv = [] - for key, value in tags.items(): - # If the value is a collection, we want to flatten it. - if isinstance(value, (list, tuple)): - for inner_value in value: - if inner_value is not None: - rv.append((key, str(inner_value))) - elif value is not None: - rv.append((key, str(value))) - - # It's very important to sort the tags in order to obtain the - # same bucket key. - return tuple(sorted(rv)) - - -def _tags_to_dict(tags): - # type: (MetricTagsInternal) -> Dict[str, Any] - rv = {} # type: Dict[str, Any] - for tag_name, tag_value in tags: - old_value = rv.get(tag_name) - if old_value is not None: - if isinstance(old_value, list): - old_value.append(tag_value) - else: - rv[tag_name] = [old_value, tag_value] - else: - rv[tag_name] = tag_value - return rv - - -def _get_aggregator(): - # type: () -> Optional[MetricsAggregator] - client = sentry_sdk.get_client() - return ( - client.metrics_aggregator - if client.is_active() and client.metrics_aggregator is not None - else None - ) - - -def _get_aggregator_and_update_tags(key, value, unit, tags): - # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]] - client = sentry_sdk.get_client() - if not client.is_active() or client.metrics_aggregator is None: - return None, None, tags - - updated_tags = dict(tags or ()) # type: Dict[str, MetricTagValue] - updated_tags.setdefault("release", client.options["release"]) - updated_tags.setdefault("environment", client.options["environment"]) - - scope = sentry_sdk.get_current_scope() - local_aggregator = None - - # We go with the low-level API here to access transaction information as - # this one is the same between just errors and errors + performance - transaction_source = scope._transaction_info.get("source") - if transaction_source in GOOD_TRANSACTION_SOURCES: - transaction_name = scope._transaction - if transaction_name: - updated_tags.setdefault("transaction", transaction_name) - if scope._span is not None: - local_aggregator = scope._span._get_local_aggregator() - - experiments = client.options.get("_experiments", {}) - before_emit_callback = experiments.get("before_emit_metric") - if before_emit_callback is not None: - with recursion_protection() as in_metrics: - if not in_metrics: - if not before_emit_callback(key, value, unit, updated_tags): - return None, None, updated_tags - - return client.metrics_aggregator, local_aggregator, updated_tags - - -def increment( - key, # type: str - value=1.0, # type: float - unit="none", # type: MeasurementUnit - tags=None, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - stacklevel=0, # type: int -): - # type: (...) -> None - """Increments a counter.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - key, value, unit, tags - ) - if aggregator is not None: - aggregator.add( - "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel - ) - - -# alias as incr is relatively common in python -incr = increment - - -class _Timing: - def __init__( - self, - key, # type: str - tags, # type: Optional[MetricTags] - timestamp, # type: Optional[Union[float, datetime]] - value, # type: Optional[float] - unit, # type: DurationUnit - stacklevel, # type: int - ): - # type: (...) -> None - self.key = key - self.tags = tags - self.timestamp = timestamp - self.value = value - self.unit = unit - self.entered = None # type: Optional[float] - self._span = None # type: Optional[sentry_sdk.tracing.Span] - self.stacklevel = stacklevel - - def _validate_invocation(self, context): - # type: (str) -> None - if self.value is not None: - raise TypeError( - "cannot use timing as %s when a value is provided" % context - ) - - def __enter__(self): - # type: (...) -> _Timing - self.entered = TIMING_FUNCTIONS[self.unit]() - self._validate_invocation("context-manager") - self._span = sentry_sdk.start_span(op="metric.timing", name=self.key) - if self.tags: - for key, value in self.tags.items(): - if isinstance(value, (tuple, list)): - value = ",".join(sorted(map(str, value))) - self._span.set_tag(key, value) - self._span.__enter__() - - # report code locations here for better accuracy - aggregator = _get_aggregator() - if aggregator is not None: - aggregator.record_code_location("d", self.key, self.unit, self.stacklevel) - - return self - - def __exit__(self, exc_type, exc_value, tb): - # type: (Any, Any, Any) -> None - assert self._span, "did not enter" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - self.key, - self.value, - self.unit, - self.tags, - ) - if aggregator is not None: - elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered # type: ignore - aggregator.add( - "d", - self.key, - elapsed, - self.unit, - tags, - self.timestamp, - local_aggregator, - None, # code locations are reported in __enter__ - ) - - self._span.__exit__(exc_type, exc_value, tb) - self._span = None - - def __call__(self, f): - # type: (Any) -> Any - self._validate_invocation("decorator") - - @wraps(f) - def timed_func(*args, **kwargs): - # type: (*Any, **Any) -> Any - with timing( - key=self.key, - tags=self.tags, - timestamp=self.timestamp, - unit=self.unit, - stacklevel=self.stacklevel + 1, - ): - return f(*args, **kwargs) - - return timed_func - - -def timing( - key, # type: str - value=None, # type: Optional[float] - unit="second", # type: DurationUnit - tags=None, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - stacklevel=0, # type: int -): - # type: (...) -> _Timing - """Emits a distribution with the time it takes to run the given code block. - - This method supports three forms of invocation: - - - when a `value` is provided, it functions similar to `distribution` but with - - it can be used as a context manager - - it can be used as a decorator - """ - if value is not None: - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - key, value, unit, tags - ) - if aggregator is not None: - aggregator.add( - "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel - ) - return _Timing(key, tags, timestamp, value, unit, stacklevel) - - -def distribution( - key, # type: str - value, # type: float - unit="none", # type: MeasurementUnit - tags=None, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - stacklevel=0, # type: int -): - # type: (...) -> None - """Emits a distribution.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - key, value, unit, tags - ) - if aggregator is not None: - aggregator.add( - "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel - ) - - -def set( - key, # type: str - value, # type: Union[int, str] - unit="none", # type: MeasurementUnit - tags=None, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - stacklevel=0, # type: int -): - # type: (...) -> None - """Emits a set.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - key, value, unit, tags - ) - if aggregator is not None: - aggregator.add( - "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel - ) - - -def gauge( - key, # type: str - value, # type: float - unit="none", # type: MeasurementUnit - tags=None, # type: Optional[MetricTags] - timestamp=None, # type: Optional[Union[float, datetime]] - stacklevel=0, # type: int -): - # type: (...) -> None - """Emits a gauge.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - key, value, unit, tags - ) - if aggregator is not None: - aggregator.add( - "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel - ) diff --git a/sentry_sdk/scope.py b/sentry_sdk/scope.py index c871e6a467..f9caf7e1d6 100644 --- a/sentry_sdk/scope.py +++ b/sentry_sdk/scope.py @@ -1679,7 +1679,7 @@ def new_scope(): try: # restore original scope _current_scope.reset(token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) @@ -1717,7 +1717,7 @@ def use_scope(scope): try: # restore original scope _current_scope.reset(token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) @@ -1761,12 +1761,12 @@ def isolation_scope(): # restore original scopes try: _current_scope.reset(current_token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) try: _isolation_scope.reset(isolation_token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) @@ -1808,12 +1808,12 @@ def use_isolation_scope(isolation_scope): # restore original scopes try: _current_scope.reset(current_token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) try: _isolation_scope.reset(isolation_token) - except LookupError: + except (LookupError, ValueError): capture_internal_exception(sys.exc_info()) diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 1697df1f22..0d652e490a 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -276,7 +276,6 @@ class Span: "hub", "_context_manager_state", "_containing_transaction", - "_local_aggregator", "scope", "origin", "name", @@ -345,7 +344,6 @@ def __init__( self.timestamp = None # type: Optional[datetime] self._span_recorder = None # type: Optional[_SpanRecorder] - self._local_aggregator = None # type: Optional[LocalAggregator] self.update_active_thread() self.set_profiler_id(get_profiler_id()) @@ -383,13 +381,6 @@ def span_id(self, value): # type: (str) -> None self._span_id = value - def _get_local_aggregator(self): - # type: (...) -> LocalAggregator - rv = self._local_aggregator - if rv is None: - rv = self._local_aggregator = LocalAggregator() - return rv - def __repr__(self): # type: () -> str return ( @@ -741,11 +732,6 @@ def to_json(self): if self.status: self._tags["status"] = self.status - if self._local_aggregator is not None: - metrics_summary = self._local_aggregator.to_json() - if metrics_summary: - rv["_metrics_summary"] = metrics_summary - if len(self._measurements) > 0: rv["measurements"] = self._measurements @@ -1122,13 +1108,6 @@ def finish( event["measurements"] = self._measurements - # This is here since `to_json` is not invoked. This really should - # be gone when we switch to onlyspans. - if self._local_aggregator is not None: - metrics_summary = self._local_aggregator.to_json() - if metrics_summary: - event["_metrics_summary"] = metrics_summary - return scope.capture_event(event) def set_measurement(self, name, value, unit=""): @@ -1505,8 +1484,3 @@ def calculate_interest_rate(amount, rate, years): has_tracing_enabled, maybe_create_breadcrumbs_from_span, ) - -with warnings.catch_warnings(): - # The code in this file which uses `LocalAggregator` is only called from the deprecated `metrics` module. - warnings.simplefilter("ignore", DeprecationWarning) - from sentry_sdk.metrics import LocalAggregator diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index b81d647c6d..5c3f530e2e 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -218,33 +218,11 @@ def _should_be_included( ) -def add_query_source(span): - # type: (sentry_sdk.tracing.Span) -> None +def add_source(span, project_root, in_app_include, in_app_exclude): + # type: (sentry_sdk.tracing.Span, Optional[str], Optional[list[str]], Optional[list[str]]) -> None """ Adds OTel compatible source code information to the span """ - client = sentry_sdk.get_client() - if not client.is_active(): - return - - if span.timestamp is None or span.start_timestamp is None: - return - - should_add_query_source = client.options.get("enable_db_query_source", True) - if not should_add_query_source: - return - - duration = span.timestamp - span.start_timestamp - threshold = client.options.get("db_query_source_threshold_ms", 0) - slow_query = duration / timedelta(milliseconds=1) > threshold - - if not slow_query: - return - - project_root = client.options["project_root"] - in_app_include = client.options.get("in_app_include") - in_app_exclude = client.options.get("in_app_exclude") - # Find the correct frame frame = sys._getframe() # type: Union[FrameType, None] while frame is not None: @@ -309,6 +287,68 @@ def add_query_source(span): span.set_data(SPANDATA.CODE_FUNCTION, frame.f_code.co_name) +def add_query_source(span): + # type: (sentry_sdk.tracing.Span) -> None + """ + Adds OTel compatible source code information to a database query span + """ + client = sentry_sdk.get_client() + if not client.is_active(): + return + + if span.timestamp is None or span.start_timestamp is None: + return + + should_add_query_source = client.options.get("enable_db_query_source", True) + if not should_add_query_source: + return + + duration = span.timestamp - span.start_timestamp + threshold = client.options.get("db_query_source_threshold_ms", 0) + slow_query = duration / timedelta(milliseconds=1) > threshold + + if not slow_query: + return + + add_source( + span=span, + project_root=client.options["project_root"], + in_app_include=client.options.get("in_app_include"), + in_app_exclude=client.options.get("in_app_exclude"), + ) + + +def add_http_request_source(span): + # type: (sentry_sdk.tracing.Span) -> None + """ + Adds OTel compatible source code information to a span for an outgoing HTTP request + """ + client = sentry_sdk.get_client() + if not client.is_active(): + return + + if span.timestamp is None or span.start_timestamp is None: + return + + should_add_request_source = client.options.get("enable_http_request_source", False) + if not should_add_request_source: + return + + duration = span.timestamp - span.start_timestamp + threshold = client.options.get("http_request_source_threshold_ms", 0) + slow_query = duration / timedelta(milliseconds=1) > threshold + + if not slow_query: + return + + add_source( + span=span, + project_root=client.options["project_root"], + in_app_include=client.options.get("in_app_include"), + in_app_exclude=client.options.get("in_app_exclude"), + ) + + def extract_sentrytrace_data(header): # type: (Optional[str]) -> Optional[Dict[str, Union[str, bool, None]]] """ @@ -327,9 +367,9 @@ def extract_sentrytrace_data(header): trace_id, parent_span_id, sampled_str = match.groups() parent_sampled = None - if trace_id: + if trace_id and len(trace_id) != 32: trace_id = "{:032x}".format(int(trace_id, 16)) - if parent_span_id: + if parent_span_id and len(parent_span_id) != 16: parent_span_id = "{:016x}".format(int(parent_span_id, 16)) if sampled_str: parent_sampled = sampled_str != "0" diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 75384519e9..645bfead19 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -171,17 +171,7 @@ def _parse_rate_limits(header, now=None): retry_after = now + timedelta(seconds=int(retry_after_val)) for category in categories and categories.split(";") or (None,): - if category == "metric_bucket": - try: - namespaces = parameters[4].split(";") - except IndexError: - namespaces = [] - - if not namespaces or "custom" in namespaces: - yield category, retry_after # type: ignore - - else: - yield category, retry_after # type: ignore + yield category, retry_after # type: ignore except (LookupError, ValueError): continue @@ -417,12 +407,6 @@ def _check_disabled(self, category): # type: (str) -> bool def _disabled(bucket): # type: (Any) -> bool - - # The envelope item type used for metrics is statsd - # whereas the rate limit category is metric_bucket - if bucket == "statsd": - bucket = "metric_bucket" - ts = self._disabled_until.get(bucket) return ts is not None and ts > datetime.now(timezone.utc) diff --git a/sentry_sdk/types.py b/sentry_sdk/types.py index 1a65247584..8b28166462 100644 --- a/sentry_sdk/types.py +++ b/sentry_sdk/types.py @@ -21,6 +21,7 @@ Log, MonitorConfig, SamplingContext, + Metric, ) else: from typing import Any @@ -35,6 +36,7 @@ Log = Any MonitorConfig = Any SamplingContext = Any + Metric = Any __all__ = ( @@ -46,4 +48,5 @@ "Log", "MonitorConfig", "SamplingContext", + "Metric", ) diff --git a/sentry_sdk/utils.py b/sentry_sdk/utils.py index 2083fd296c..cd825b29e2 100644 --- a/sentry_sdk/utils.py +++ b/sentry_sdk/utils.py @@ -59,7 +59,7 @@ from gevent.hub import Hub - from sentry_sdk._types import Event, ExcInfo, Log, Hint + from sentry_sdk._types import Event, ExcInfo, Log, Hint, Metric P = ParamSpec("P") R = TypeVar("R") @@ -2013,3 +2013,19 @@ def get_before_send_log(options): return options.get("before_send_log") or options["_experiments"].get( "before_send_log" ) + + +def has_metrics_enabled(options): + # type: (Optional[dict[str, Any]]) -> bool + if options is None: + return False + + return bool(options["_experiments"].get("enable_metrics", False)) + + +def get_before_send_metric(options): + # type: (Optional[dict[str, Any]]) -> Optional[Callable[[Metric, Hint], Optional[Metric]]] + if options is None: + return None + + return options["_experiments"].get("before_send_metric") diff --git a/setup.py b/setup.py index 7119e20e90..37c9cf54a6 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def get_file_text(file_name): setup( name="sentry-sdk", - version="2.39.0", + version="2.42.0", author="Sentry Team and Contributors", author_email="hello@sentry.io", url="https://github.com/getsentry/sentry-python", @@ -65,6 +65,7 @@ def get_file_text(file_name): "langchain": ["langchain>=0.0.210"], "langgraph": ["langgraph>=0.6.6"], "launchdarkly": ["launchdarkly-server-sdk>=9.8.0"], + "litellm": ["litellm>=1.77.5"], "litestar": ["litestar>=2.0.0"], "loguru": ["loguru>=0.5"], "openai": ["openai>=1.0.0", "tiktoken>=0.3.0"], @@ -83,6 +84,7 @@ def get_file_text(file_name): "statsig": ["statsig>=0.55.3"], "tornado": ["tornado>=6"], "unleash": ["UnleashClient>=6.0.1"], + "google-genai": ["google-genai>=1.29.0"], }, entry_points={ "opentelemetry_propagator": [ diff --git a/tests/integrations/aiohttp/__init__.py b/tests/integrations/aiohttp/__init__.py index 0e1409fda0..a585c11e34 100644 --- a/tests/integrations/aiohttp/__init__.py +++ b/tests/integrations/aiohttp/__init__.py @@ -1,3 +1,9 @@ +import os +import sys import pytest pytest.importorskip("aiohttp") + +# Load `aiohttp_helpers` into the module search path to test request source path names relative to module. See +# `test_request_source_with_module_in_search_path` +sys.path.insert(0, os.path.join(os.path.dirname(__file__))) diff --git a/tests/integrations/aiohttp/aiohttp_helpers/__init__.py b/tests/integrations/aiohttp/aiohttp_helpers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integrations/aiohttp/aiohttp_helpers/helpers.py b/tests/integrations/aiohttp/aiohttp_helpers/helpers.py new file mode 100644 index 0000000000..86a6fa39e3 --- /dev/null +++ b/tests/integrations/aiohttp/aiohttp_helpers/helpers.py @@ -0,0 +1,2 @@ +async def get_request_with_client(client, url): + await client.get(url) diff --git a/tests/integrations/aiohttp/test_aiohttp.py b/tests/integrations/aiohttp/test_aiohttp.py index 267ce08fdd..811bf7efca 100644 --- a/tests/integrations/aiohttp/test_aiohttp.py +++ b/tests/integrations/aiohttp/test_aiohttp.py @@ -1,3 +1,5 @@ +import os +import datetime import asyncio import json @@ -18,7 +20,8 @@ ) from sentry_sdk import capture_message, start_transaction -from sentry_sdk.integrations.aiohttp import AioHttpIntegration +from sentry_sdk.integrations.aiohttp import AioHttpIntegration, create_trace_config +from sentry_sdk.consts import SPANDATA from tests.conftest import ApproxDict @@ -633,6 +636,353 @@ async def handler(request): ) +@pytest.mark.asyncio +@pytest.mark.parametrize("enable_http_request_source", [None, False]) +async def test_request_source_disabled( + sentry_init, + aiohttp_raw_server, + aiohttp_client, + capture_events, + enable_http_request_source, +): + sentry_options = { + "integrations": [AioHttpIntegration()], + "traces_sample_rate": 1.0, + "http_request_source_threshold_ms": 0, + } + + if enable_http_request_source is not None: + sentry_options["enable_http_request_source"] = enable_http_request_source + + sentry_init(**sentry_options) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + async def hello(request): + span_client = await aiohttp_client(raw_server) + await span_client.get("/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", hello) + + events = capture_events() + + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +@pytest.mark.asyncio +async def test_request_source_enabled( + sentry_init, + aiohttp_raw_server, + aiohttp_client, + capture_events, +): + sentry_options = { + "integrations": [AioHttpIntegration()], + "traces_sample_rate": 1.0, + "enable_http_request_source": True, + "http_request_source_threshold_ms": 0, + } + + sentry_init(**sentry_options) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + async def hello(request): + span_client = await aiohttp_client(raw_server) + await span_client.get("/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", hello) + + events = capture_events() + + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + +@pytest.mark.asyncio +async def test_request_source( + sentry_init, aiohttp_raw_server, aiohttp_client, capture_events +): + sentry_init( + integrations=[AioHttpIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + async def handler_with_outgoing_request(request): + span_client = await aiohttp_client(raw_server) + await span_client.get("/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", handler_with_outgoing_request) + + events = capture_events() + + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert ( + data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.aiohttp.test_aiohttp" + ) + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/aiohttp/test_aiohttp.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "handler_with_outgoing_request" + + +@pytest.mark.asyncio +async def test_request_source_with_module_in_search_path( + sentry_init, aiohttp_raw_server, aiohttp_client, capture_events +): + """ + Test that request source is relative to the path of the module it ran in + """ + sentry_init( + integrations=[AioHttpIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + from aiohttp_helpers.helpers import get_request_with_client + + async def handler_with_outgoing_request(request): + span_client = await aiohttp_client(raw_server) + await get_request_with_client(span_client, "/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", handler_with_outgoing_request) + + events = capture_events() + + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "aiohttp_helpers.helpers" + assert data.get(SPANDATA.CODE_FILEPATH) == "aiohttp_helpers/helpers.py" + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "get_request_with_client" + + +@pytest.mark.asyncio +async def test_no_request_source_if_duration_too_short( + sentry_init, aiohttp_raw_server, aiohttp_client, capture_events +): + sentry_init( + integrations=[AioHttpIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + async def handler_with_outgoing_request(request): + span_client = await aiohttp_client(raw_server) + await span_client.get("/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", handler_with_outgoing_request) + + events = capture_events() + + def fake_create_trace_context(*args, **kwargs): + trace_context = create_trace_config() + + async def overwrite_timestamps(session, trace_config_ctx, params): + span = trace_config_ctx.span + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=99999) + + trace_context.on_request_end.insert(0, overwrite_timestamps) + + return trace_context + + with mock.patch( + "sentry_sdk.integrations.aiohttp.create_trace_config", + fake_create_trace_context, + ): + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +@pytest.mark.asyncio +async def test_request_source_if_duration_over_threshold( + sentry_init, aiohttp_raw_server, aiohttp_client, capture_events +): + sentry_init( + integrations=[AioHttpIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + # server for making span request + async def handler(request): + return web.Response(text="OK") + + raw_server = await aiohttp_raw_server(handler) + + async def handler_with_outgoing_request(request): + span_client = await aiohttp_client(raw_server) + await span_client.get("/") + return web.Response(text="hello") + + app = web.Application() + app.router.add_get(r"/", handler_with_outgoing_request) + + events = capture_events() + + def fake_create_trace_context(*args, **kwargs): + trace_context = create_trace_config() + + async def overwrite_timestamps(session, trace_config_ctx, params): + span = trace_config_ctx.span + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=100001) + + trace_context.on_request_end.insert(0, overwrite_timestamps) + + return trace_context + + with mock.patch( + "sentry_sdk.integrations.aiohttp.create_trace_config", + fake_create_trace_context, + ): + client = await aiohttp_client(app) + await client.get("/") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert ( + data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.aiohttp.test_aiohttp" + ) + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/aiohttp/test_aiohttp.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "handler_with_outgoing_request" + + @pytest.mark.asyncio async def test_span_origin( sentry_init, diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index 04ff12eb8b..e9065e2d32 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -1,5 +1,6 @@ import pytest from unittest import mock +import json try: from unittest.mock import AsyncMock @@ -878,3 +879,69 @@ def test_set_output_data_with_input_json_delta(sentry_init): assert span._data.get(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS) == 10 assert span._data.get(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS) == 20 assert span._data.get(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS) == 30 + + +def test_anthropic_message_role_mapping(sentry_init, capture_events): + """Test that Anthropic integration properly maps message roles like 'ai' to 'assistant'""" + sentry_init( + integrations=[AnthropicIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + client = Anthropic(api_key="z") + + def mock_messages_create(*args, **kwargs): + return Message( + id="msg_1", + content=[TextBlock(text="Hi there!", type="text")], + model="claude-3-opus", + role="assistant", + stop_reason="end_turn", + stop_sequence=None, + type="message", + usage=Usage(input_tokens=10, output_tokens=5), + ) + + client.messages._post = mock.Mock(return_value=mock_messages_create()) + + # Test messages with mixed roles including "ai" that should be mapped to "assistant" + test_messages = [ + {"role": "system", "content": "You are helpful."}, + {"role": "user", "content": "Hello"}, + {"role": "ai", "content": "Hi there!"}, # Should be mapped to "assistant" + {"role": "assistant", "content": "How can I help?"}, # Should stay "assistant" + ] + + with start_transaction(name="anthropic tx"): + client.messages.create( + model="claude-3-opus", max_tokens=10, messages=test_messages + ) + + (event,) = events + span = event["spans"][0] + + # Verify that the span was created correctly + assert span["op"] == "gen_ai.chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + + # Parse the stored messages + stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + + # Verify that "ai" role was mapped to "assistant" + assert len(stored_messages) == 4 + assert stored_messages[0]["role"] == "system" + assert stored_messages[1]["role"] == "user" + assert ( + stored_messages[2]["role"] == "assistant" + ) # "ai" should be mapped to "assistant" + assert stored_messages[3]["role"] == "assistant" # should stay "assistant" + + # Verify content is preserved + assert stored_messages[2]["content"] == "Hi there!" + assert stored_messages[3]["content"] == "How can I help?" + + # Verify no "ai" roles remain + roles = [msg["role"] for msg in stored_messages] + assert "ai" not in roles diff --git a/tests/integrations/dramatiq/test_dramatiq.py b/tests/integrations/dramatiq/test_dramatiq.py index d7917cbd00..53c36b640c 100644 --- a/tests/integrations/dramatiq/test_dramatiq.py +++ b/tests/integrations/dramatiq/test_dramatiq.py @@ -5,12 +5,21 @@ from dramatiq.brokers.stub import StubBroker import sentry_sdk +from sentry_sdk.tracing import TransactionSource +from sentry_sdk import start_transaction +from sentry_sdk.consts import SPANSTATUS from sentry_sdk.integrations.dramatiq import DramatiqIntegration +from sentry_sdk.integrations.logging import ignore_logger +ignore_logger("dramatiq.worker.WorkerThread") -@pytest.fixture -def broker(sentry_init): - sentry_init(integrations=[DramatiqIntegration()]) + +@pytest.fixture(scope="function") +def broker(request, sentry_init): + sentry_init( + integrations=[DramatiqIntegration()], + traces_sample_rate=getattr(request, "param", None), + ) broker = StubBroker() broker.emit_after("process_boot") dramatiq.set_broker(broker) @@ -44,19 +53,57 @@ def dummy_actor(x, y): assert exception["type"] == "ZeroDivisionError" -def test_that_actor_name_is_set_as_transaction(broker, worker, capture_events): +@pytest.mark.parametrize( + "broker,expected_span_status", + [ + (1.0, SPANSTATUS.INTERNAL_ERROR), + (1.0, SPANSTATUS.OK), + ], + ids=["error", "success"], + indirect=["broker"], +) +def test_task_transaction(broker, worker, capture_events, expected_span_status): events = capture_events() + task_fails = expected_span_status == SPANSTATUS.INTERNAL_ERROR @dramatiq.actor(max_retries=0) def dummy_actor(x, y): return x / y - dummy_actor.send(1, 0) + dummy_actor.send(1, int(not task_fails)) broker.join(dummy_actor.queue_name) worker.join() + if task_fails: + error_event = events.pop(0) + exception = error_event["exception"]["values"][0] + assert exception["type"] == "ZeroDivisionError" + assert exception["mechanism"]["type"] == DramatiqIntegration.identifier + (event,) = events + assert event["type"] == "transaction" assert event["transaction"] == "dummy_actor" + assert event["transaction_info"] == {"source": TransactionSource.TASK} + assert event["contexts"]["trace"]["status"] == expected_span_status + + +@pytest.mark.parametrize("broker", [1.0], indirect=True) +def test_dramatiq_propagate_trace(broker, worker, capture_events): + events = capture_events() + + @dramatiq.actor(max_retries=0) + def propagated_trace_task(): + pass + + with start_transaction() as outer_transaction: + propagated_trace_task.send() + broker.join(propagated_trace_task.queue_name) + worker.join() + + assert ( + events[0]["transaction"] == "propagated_trace_task" + ) # the "inner" transaction + assert events[0]["contexts"]["trace"]["trace_id"] == outer_transaction.trace_id def test_that_dramatiq_message_id_is_set_as_extra(broker, worker, capture_events): diff --git a/tests/integrations/google_genai/__init__.py b/tests/integrations/google_genai/__init__.py new file mode 100644 index 0000000000..5143bf4536 --- /dev/null +++ b/tests/integrations/google_genai/__init__.py @@ -0,0 +1,4 @@ +import pytest + +pytest.importorskip("google") +pytest.importorskip("google.genai") diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py new file mode 100644 index 0000000000..470be31944 --- /dev/null +++ b/tests/integrations/google_genai/test_google_genai.py @@ -0,0 +1,907 @@ +import json +import pytest +from unittest import mock + +from google import genai +from google.genai import types as genai_types + +from sentry_sdk import start_transaction +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations.google_genai import GoogleGenAIIntegration + + +@pytest.fixture +def mock_genai_client(): + """Fixture that creates a real genai.Client with mocked HTTP responses.""" + client = genai.Client(api_key="test-api-key") + return client + + +def create_mock_http_response(response_body): + """ + Create a mock HTTP response that the API client's request() method would return. + + Args: + response_body: The JSON body as a string or dict + + Returns: + An HttpResponse object with headers and body + """ + if isinstance(response_body, dict): + response_body = json.dumps(response_body) + + return genai_types.HttpResponse( + headers={ + "content-type": "application/json; charset=UTF-8", + }, + body=response_body, + ) + + +def create_mock_streaming_responses(response_chunks): + """ + Create a generator that yields mock HTTP responses for streaming. + + Args: + response_chunks: List of dicts, each representing a chunk's JSON body + + Returns: + A generator that yields HttpResponse objects + """ + for chunk in response_chunks: + yield create_mock_http_response(chunk) + + +# Sample API response JSON (based on real API format from user) +EXAMPLE_API_RESPONSE_JSON = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Hello! How can I help you today?"}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 20, + "totalTokenCount": 30, + "cachedContentTokenCount": 5, + "thoughtsTokenCount": 3, + }, + "modelVersion": "gemini-1.5-flash", + "responseId": "response-id-123", +} + + +def create_test_config( + temperature=None, + top_p=None, + top_k=None, + max_output_tokens=None, + presence_penalty=None, + frequency_penalty=None, + seed=None, + system_instruction=None, + tools=None, +): + """Create a GenerateContentConfig.""" + config_dict = {} + + if temperature is not None: + config_dict["temperature"] = temperature + if top_p is not None: + config_dict["top_p"] = top_p + if top_k is not None: + config_dict["top_k"] = top_k + if max_output_tokens is not None: + config_dict["max_output_tokens"] = max_output_tokens + if presence_penalty is not None: + config_dict["presence_penalty"] = presence_penalty + if frequency_penalty is not None: + config_dict["frequency_penalty"] = frequency_penalty + if seed is not None: + config_dict["seed"] = seed + if system_instruction is not None: + # Convert string to Content for system instruction + if isinstance(system_instruction, str): + system_instruction = genai_types.Content( + parts=[genai_types.Part(text=system_instruction)], role="system" + ) + config_dict["system_instruction"] = system_instruction + if tools is not None: + config_dict["tools"] = tools + + return genai_types.GenerateContentConfig(**config_dict) + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_nonstreaming_generate_content( + sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client +): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + # Mock the HTTP response at the _api_client.request() level + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, + "request", + return_value=mock_http_response, + ): + with start_transaction(name="google_genai"): + config = create_test_config(temperature=0.7, max_output_tokens=100) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Tell me a joke", config=config + ) + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert event["transaction"] == "google_genai" + + # Should have 2 spans: invoke_agent and chat + assert len(event["spans"]) == 2 + invoke_span, chat_span = event["spans"] + + # Check invoke_agent span + assert invoke_span["op"] == OP.GEN_AI_INVOKE_AGENT + assert invoke_span["description"] == "invoke_agent" + assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + + # Check chat span + assert chat_span["op"] == OP.GEN_AI_CHAT + assert chat_span["description"] == "chat gemini-1.5-flash" + assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + + if send_default_pii and include_prompts: + # Messages are serialized as JSON strings + messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert messages == [{"role": "user", "content": "Tell me a joke"}] + + # Response text is stored as a JSON array + response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + # Parse the JSON array + response_texts = json.loads(response_text) + assert response_texts == ["Hello! How can I help you today?"] + else: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["data"] + + # Check token usage + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + # Output tokens now include reasoning tokens: candidates_token_count (20) + thoughts_token_count (3) = 23 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + + # Check configuration parameters + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + + +def test_generate_content_with_system_instruction( + sentry_init, capture_events, mock_genai_client +): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config( + system_instruction="You are a helpful assistant", + temperature=0.5, + ) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="What is 2+2?", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check that system instruction is included in messages + # (PII is enabled and include_prompts is True in this test) + messages_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + # Parse the JSON string to verify content + messages = json.loads(messages_str) + assert len(messages) == 2 + assert messages[0] == {"role": "system", "content": "You are a helpful assistant"} + assert messages[1] == {"role": "user", "content": "What is 2+2?"} + + +def test_generate_content_with_tools(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Create a mock tool function + def get_weather(location: str) -> str: + """Get the weather for a location""" + return f"The weather in {location} is sunny" + + # Create a tool with function declarations using real types + function_declaration = genai_types.FunctionDeclaration( + name="get_weather_tool", + description="Get weather information (tool object)", + parameters=genai_types.Schema( + type=genai_types.Type.OBJECT, + properties={ + "location": genai_types.Schema( + type=genai_types.Type.STRING, + description="The location to get weather for", + ) + }, + required=["location"], + ), + ) + + mock_tool = genai_types.Tool(function_declarations=[function_declaration]) + + # API response for tool usage + tool_response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "I'll check the weather."}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 15, + "candidatesTokenCount": 10, + "totalTokenCount": 25, + }, + } + + mock_http_response = create_mock_http_response(tool_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config(tools=[get_weather, mock_tool]) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="What's the weather?", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check that tools are recorded (data is serialized as a string) + tools_data_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + # Parse the JSON string to verify content + tools_data = json.loads(tools_data_str) + assert len(tools_data) == 2 + + # The order of tools may not be guaranteed, so sort by name and description for comparison + sorted_tools = sorted( + tools_data, key=lambda t: (t.get("name", ""), t.get("description", "")) + ) + + # The function tool + assert sorted_tools[0]["name"] == "get_weather" + assert sorted_tools[0]["description"] == "Get the weather for a location" + + # The FunctionDeclaration tool + assert sorted_tools[1]["name"] == "get_weather_tool" + assert sorted_tools[1]["description"] == "Get weather information (tool object)" + + +def test_tool_execution(sentry_init, capture_events): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Create a mock tool function + def get_weather(location: str) -> str: + """Get the weather for a location""" + return f"The weather in {location} is sunny" + + # Create wrapped version of the tool + from sentry_sdk.integrations.google_genai.utils import wrapped_tool + + wrapped_weather = wrapped_tool(get_weather) + + # Execute the wrapped tool + with start_transaction(name="test_tool"): + result = wrapped_weather("San Francisco") + + assert result == "The weather in San Francisco is sunny" + + (event,) = events + assert len(event["spans"]) == 1 + tool_span = event["spans"][0] + + assert tool_span["op"] == OP.GEN_AI_EXECUTE_TOOL + assert tool_span["description"] == "execute_tool get_weather" + assert tool_span["data"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" + assert tool_span["data"][SPANDATA.GEN_AI_TOOL_TYPE] == "function" + assert ( + tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + == "Get the weather for a location" + ) + + +def test_error_handling(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Mock an error at the HTTP level + with mock.patch.object( + mock_genai_client._api_client, "request", side_effect=Exception("API Error") + ): + with start_transaction(name="google_genai"): + with pytest.raises(Exception, match="API Error"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", + contents="This will fail", + config=create_test_config(), + ) + + # Should have both transaction and error events + assert len(events) == 2 + error_event, transaction_event = events + + assert error_event["level"] == "error" + assert error_event["exception"]["values"][0]["type"] == "Exception" + assert error_event["exception"]["values"][0]["value"] == "API Error" + assert error_event["exception"]["values"][0]["mechanism"]["type"] == "google_genai" + + +def test_streaming_generate_content(sentry_init, capture_events, mock_genai_client): + """Test streaming with generate_content_stream, verifying chunk accumulation.""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Create streaming chunks - simulating a multi-chunk response + # Chunk 1: First part of text with partial usage metadata + chunk1_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Hello! "}], + }, + # No finishReason in intermediate chunks + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 2, + "totalTokenCount": 12, # Not set in intermediate chunks + }, + "responseId": "response-id-stream-123", + "modelVersion": "gemini-1.5-flash", + } + + # Chunk 2: Second part of text with more usage metadata + chunk2_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "How can I "}], + }, + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 3, + "totalTokenCount": 13, + }, + } + + # Chunk 3: Final part with finish reason and complete usage metadata + chunk3_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "help you today?"}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 7, + "totalTokenCount": 25, + "cachedContentTokenCount": 5, + "thoughtsTokenCount": 3, + }, + } + + # Create streaming mock responses + stream_chunks = [chunk1_json, chunk2_json, chunk3_json] + mock_stream = create_mock_streaming_responses(stream_chunks) + + with mock.patch.object( + mock_genai_client._api_client, "request_streamed", return_value=mock_stream + ): + with start_transaction(name="google_genai"): + config = create_test_config() + stream = mock_genai_client.models.generate_content_stream( + model="gemini-1.5-flash", contents="Stream me a response", config=config + ) + + # Consume the stream (this is what users do with the integration wrapper) + collected_chunks = list(stream) + + # Verify we got all chunks + assert len(collected_chunks) == 3 + assert collected_chunks[0].candidates[0].content.parts[0].text == "Hello! " + assert collected_chunks[1].candidates[0].content.parts[0].text == "How can I " + assert collected_chunks[2].candidates[0].content.parts[0].text == "help you today?" + + (event,) = events + + # There should be 2 spans: invoke_agent and chat + assert len(event["spans"]) == 2 + invoke_span = event["spans"][0] + chat_span = event["spans"][1] + + # Check that streaming flag is set on both spans + assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + # Verify accumulated response text (all chunks combined) + expected_full_text = "Hello! How can I help you today?" + # Response text is stored as a JSON string + chat_response_text = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) + invoke_response_text = json.loads( + invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + ) + assert chat_response_text == [expected_full_text] + assert invoke_response_text == [expected_full_text] + + # Verify finish reasons (only the final chunk has a finish reason) + # When there's a single finish reason, it's stored as a plain string (not JSON) + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in invoke_span["data"] + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + + # Verify token counts - should reflect accumulated values + # Input tokens: max of all chunks = 10 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 30 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 30 + + # Output tokens: candidates (2 + 3 + 7 = 12) + reasoning (3) = 15 + # Note: output_tokens includes both candidates and reasoning tokens + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 15 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 15 + + # Total tokens: from the last chunk + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 50 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 50 + + # Cached tokens: max of all chunks = 5 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + + # Reasoning tokens: sum of thoughts_token_count = 3 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + + # Verify model name + assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash" + + +def test_span_origin(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test origin", config=config + ) + + (event,) = events + + assert event["contexts"]["trace"]["origin"] == "manual" + for span in event["spans"]: + assert span["origin"] == "auto.ai.google_genai" + + +def test_response_without_usage_metadata( + sentry_init, capture_events, mock_genai_client +): + """Test handling of responses without usage metadata""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response without usage metadata + response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "No usage data"}], + }, + "finishReason": "STOP", + } + ], + } + + mock_http_response = create_mock_http_response(response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=config + ) + + (event,) = events + chat_span = event["spans"][1] + + # Usage data should not be present + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["data"] + + +def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): + """Test handling of multiple response candidates""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Response with multiple candidates + multi_candidate_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Response 1"}], + }, + "finishReason": "STOP", + }, + { + "content": { + "role": "model", + "parts": [{"text": "Response 2"}], + }, + "finishReason": "MAX_TOKENS", + }, + ], + "usageMetadata": { + "promptTokenCount": 5, + "candidatesTokenCount": 15, + "totalTokenCount": 20, + }, + } + + mock_http_response = create_mock_http_response(multi_candidate_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Generate multiple", config=config + ) + + (event,) = events + chat_span = event["spans"][1] + + # Should capture all responses + # Response text is stored as a JSON string when there are multiple responses + response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + if isinstance(response_text, str) and response_text.startswith("["): + # It's a JSON array + response_list = json.loads(response_text) + assert response_list == ["Response 1", "Response 2"] + else: + # It's concatenated + assert response_text == "Response 1\nResponse 2" + + # Finish reasons are serialized as JSON + finish_reasons = json.loads( + chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] + ) + assert finish_reasons == ["STOP", "MAX_TOKENS"] + + +def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_client): + """Test that all configuration parameters are properly recorded""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config( + temperature=0.8, + top_p=0.95, + top_k=40, + max_output_tokens=2048, + presence_penalty=0.1, + frequency_penalty=0.2, + seed=12345, + ) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test all params", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check all parameters are recorded + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 + + +def test_empty_response(sentry_init, capture_events, mock_genai_client): + """Test handling of minimal response with no content""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Minimal response with empty candidates array + minimal_response_json = {"candidates": []} + mock_http_response = create_mock_http_response(minimal_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + response = mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=create_test_config() + ) + + # Response will have an empty candidates list + assert response is not None + assert len(response.candidates) == 0 + + (event,) = events + # Should still create spans even with empty candidates + assert len(event["spans"]) == 2 + + +def test_response_with_different_id_fields( + sentry_init, capture_events, mock_genai_client +): + """Test handling of different response ID field names""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response with response_id and model_version + response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Test"}], + }, + "finishReason": "STOP", + } + ], + "responseId": "resp-456", + "modelVersion": "gemini-1.5-flash-001", + } + + mock_http_response = create_mock_http_response(response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=create_test_config() + ) + + (event,) = events + chat_span = event["spans"][1] + + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gemini-1.5-flash-001" + + +def test_tool_with_async_function(sentry_init, capture_events): + """Test that async tool functions are properly wrapped""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + capture_events() + + # Create an async tool function + async def async_tool(param: str) -> str: + """An async tool""" + return f"Async result: {param}" + + # Import is skipped in sync tests, but we can test the wrapping logic + from sentry_sdk.integrations.google_genai.utils import wrapped_tool + + # The wrapper should handle async functions + wrapped_async_tool = wrapped_tool(async_tool) + assert wrapped_async_tool != async_tool # Should be wrapped + assert hasattr(wrapped_async_tool, "__wrapped__") # Should preserve original + + +def test_contents_as_none(sentry_init, capture_events, mock_genai_client): + """Test handling when contents parameter is None""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents=None, config=create_test_config() + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Should handle None contents gracefully + messages = invoke_span["data"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) + # Should only have system message if any, not user message + assert all(msg["role"] != "user" or msg["content"] is not None for msg in messages) + + +def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): + """Test extraction of tool/function calls from response""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response with function calls + function_call_response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + {"text": "I'll help you with that."}, + { + "functionCall": { + "name": "get_weather", + "args": { + "location": "San Francisco", + "unit": "celsius", + }, + } + }, + { + "functionCall": { + "name": "get_time", + "args": {"timezone": "PST"}, + } + }, + ], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 20, + "candidatesTokenCount": 30, + "totalTokenCount": 50, + }, + } + + mock_http_response = create_mock_http_response(function_call_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", + contents="What's the weather and time?", + config=create_test_config(), + ) + + (event,) = events + chat_span = event["spans"][1] # The chat span + + # Check that tool calls are extracted and stored + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["data"] + + # Parse the JSON string to verify content + tool_calls = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS]) + + assert len(tool_calls) == 2 + + # First tool call + assert tool_calls[0]["name"] == "get_weather" + assert tool_calls[0]["type"] == "function_call" + # Arguments are serialized as JSON strings + assert json.loads(tool_calls[0]["arguments"]) == { + "location": "San Francisco", + "unit": "celsius", + } + + # Second tool call + assert tool_calls[1]["name"] == "get_time" + assert tool_calls[1]["type"] == "function_call" + # Arguments are serialized as JSON strings + assert json.loads(tool_calls[1]["arguments"]) == {"timezone": "PST"} diff --git a/tests/integrations/httpx/__init__.py b/tests/integrations/httpx/__init__.py index 1afd90ea3a..e524321b8b 100644 --- a/tests/integrations/httpx/__init__.py +++ b/tests/integrations/httpx/__init__.py @@ -1,3 +1,9 @@ +import os +import sys import pytest pytest.importorskip("httpx") + +# Load `httpx_helpers` into the module search path to test request source path names relative to module. See +# `test_request_source_with_module_in_search_path` +sys.path.insert(0, os.path.join(os.path.dirname(__file__))) diff --git a/tests/integrations/httpx/httpx_helpers/__init__.py b/tests/integrations/httpx/httpx_helpers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integrations/httpx/httpx_helpers/helpers.py b/tests/integrations/httpx/httpx_helpers/helpers.py new file mode 100644 index 0000000000..f1d4f3c98b --- /dev/null +++ b/tests/integrations/httpx/httpx_helpers/helpers.py @@ -0,0 +1,6 @@ +def get_request_with_client(client, url): + client.get(url) + + +async def async_get_request_with_client(client, url): + await client.get(url) diff --git a/tests/integrations/httpx/test_httpx.py b/tests/integrations/httpx/test_httpx.py index 4fd5275fb7..1f30fdf945 100644 --- a/tests/integrations/httpx/test_httpx.py +++ b/tests/integrations/httpx/test_httpx.py @@ -1,8 +1,11 @@ +import os +import datetime import asyncio from unittest import mock import httpx import pytest +from contextlib import contextmanager import sentry_sdk from sentry_sdk import capture_message, start_transaction @@ -393,6 +396,313 @@ def test_omit_url_data_if_parsing_fails(sentry_init, capture_events, httpx_mock) assert SPANDATA.HTTP_QUERY not in event["breadcrumbs"]["values"][0]["data"] +@pytest.mark.parametrize("enable_http_request_source", [None, False]) +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_request_source_disabled( + sentry_init, capture_events, enable_http_request_source, httpx_client, httpx_mock +): + httpx_mock.add_response() + sentry_options = { + "integrations": [HttpxIntegration()], + "traces_sample_rate": 1.0, + "http_request_source_threshold_ms": 0, + } + if enable_http_request_source is not None: + sentry_options["enable_http_request_source"] = enable_http_request_source + + sentry_init(**sentry_options) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + if asyncio.iscoroutinefunction(httpx_client.get): + asyncio.get_event_loop().run_until_complete(httpx_client.get(url)) + else: + httpx_client.get(url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_request_source_enabled(sentry_init, capture_events, httpx_client, httpx_mock): + httpx_mock.add_response() + sentry_options = { + "integrations": [HttpxIntegration()], + "traces_sample_rate": 1.0, + "enable_http_request_source": True, + "http_request_source_threshold_ms": 0, + } + sentry_init(**sentry_options) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + if asyncio.iscoroutinefunction(httpx_client.get): + asyncio.get_event_loop().run_until_complete(httpx_client.get(url)) + else: + httpx_client.get(url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_request_source(sentry_init, capture_events, httpx_client, httpx_mock): + httpx_mock.add_response() + + sentry_init( + integrations=[HttpxIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + if asyncio.iscoroutinefunction(httpx_client.get): + asyncio.get_event_loop().run_until_complete(httpx_client.get(url)) + else: + httpx_client.get(url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.httpx.test_httpx" + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/httpx/test_httpx.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "test_request_source" + + +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_request_source_with_module_in_search_path( + sentry_init, capture_events, httpx_client, httpx_mock +): + """ + Test that request source is relative to the path of the module it ran in + """ + httpx_mock.add_response() + sentry_init( + integrations=[HttpxIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + if asyncio.iscoroutinefunction(httpx_client.get): + from httpx_helpers.helpers import async_get_request_with_client + + asyncio.get_event_loop().run_until_complete( + async_get_request_with_client(httpx_client, url) + ) + else: + from httpx_helpers.helpers import get_request_with_client + + get_request_with_client(httpx_client, url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "httpx_helpers.helpers" + assert data.get(SPANDATA.CODE_FILEPATH) == "httpx_helpers/helpers.py" + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + if asyncio.iscoroutinefunction(httpx_client.get): + assert data.get(SPANDATA.CODE_FUNCTION) == "async_get_request_with_client" + else: + assert data.get(SPANDATA.CODE_FUNCTION) == "get_request_with_client" + + +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_no_request_source_if_duration_too_short( + sentry_init, capture_events, httpx_client, httpx_mock +): + httpx_mock.add_response() + + sentry_init( + integrations=[HttpxIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + + @contextmanager + def fake_start_span(*args, **kwargs): + with sentry_sdk.start_span(*args, **kwargs) as span: + pass + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=99999) + yield span + + with mock.patch( + "sentry_sdk.integrations.httpx.start_span", + fake_start_span, + ): + if asyncio.iscoroutinefunction(httpx_client.get): + asyncio.get_event_loop().run_until_complete(httpx_client.get(url)) + else: + httpx_client.get(url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +@pytest.mark.parametrize( + "httpx_client", + (httpx.Client(), httpx.AsyncClient()), +) +def test_request_source_if_duration_over_threshold( + sentry_init, capture_events, httpx_client, httpx_mock +): + httpx_mock.add_response() + + sentry_init( + integrations=[HttpxIntegration()], + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + events = capture_events() + + url = "http://example.com/" + + with start_transaction(name="test_transaction"): + + @contextmanager + def fake_start_span(*args, **kwargs): + with sentry_sdk.start_span(*args, **kwargs) as span: + pass + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=100001) + yield span + + with mock.patch( + "sentry_sdk.integrations.httpx.start_span", + fake_start_span, + ): + if asyncio.iscoroutinefunction(httpx_client.get): + asyncio.get_event_loop().run_until_complete(httpx_client.get(url)) + else: + httpx_client.get(url) + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.httpx.test_httpx" + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/httpx/test_httpx.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert ( + data.get(SPANDATA.CODE_FUNCTION) + == "test_request_source_if_duration_over_threshold" + ) + + @pytest.mark.parametrize( "httpx_client", (httpx.Client(), httpx.AsyncClient()), diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 5aa3928a67..b9ab4df5bf 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -1,6 +1,8 @@ from unittest import mock import pytest +import re import responses +import httpx from huggingface_hub import InferenceClient @@ -32,17 +34,49 @@ ) +def _add_mock_response( + httpx_mock, rsps, method, url, json=None, status=200, body=None, headers=None +): + # HF v1+ uses httpx for making requests to their API, while <1 uses requests. + # Since we have to test both, we need mocks for both httpx and requests. + if HF_VERSION >= (1, 0, 0): + httpx_mock.add_response( + method=method, + url=url, + json=json, + content=body, + status_code=status, + headers=headers, + is_optional=True, + is_reusable=True, + ) + else: + rsps.add( + method=method, + url=url, + json=json, + body=body, + status=status, + headers=headers, + ) + + @pytest.fixture -def mock_hf_text_generation_api(): +def mock_hf_text_generation_api(httpx_mock): # type: () -> Any """Mock HuggingFace text generation API""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" - # Mock model info endpoint - rsps.add( - responses.GET, - MODEL_ENDPOINT.format(model_name=model_name), + _add_mock_response( + httpx_mock, + rsps, + "GET", + re.compile( + MODEL_ENDPOINT.format(model_name=model_name) + + r"(\?expand=inferenceProviderMapping)?" + ), json={ "id": model_name, "pipeline_tag": "text-generation", @@ -57,9 +91,10 @@ def mock_hf_text_generation_api(): status=200, ) - # Mock text generation endpoint - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name), json={ "generated_text": "[mocked] Hello! How can i help you?", @@ -73,61 +108,78 @@ def mock_hf_text_generation_api(): status=200, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_api_with_errors(): +def mock_hf_api_with_errors(httpx_mock): # type: () -> Any """Mock HuggingFace API that always raises errors for any request""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint with error - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={"error": "Model not found"}, status=404, ) # Mock text generation endpoint with error - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name), json={"error": "Internal server error", "message": "Something went wrong"}, status=500, ) # Mock chat completion endpoint with error - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", json={"error": "Internal server error", "message": "Something went wrong"}, status=500, ) # Catch-all pattern for any other model requests - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", "https://huggingface.co/api/models/test-model-error", json={"error": "Generic model error"}, status=500, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_text_generation_api_streaming(): +def mock_hf_text_generation_api_streaming(httpx_mock): # type: () -> Any """Mock streaming HuggingFace text generation API""" with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={ "id": model_name, @@ -146,8 +198,10 @@ def mock_hf_text_generation_api_streaming(): # Mock text generation endpoint for streaming streaming_response = b'data:{"token":{"id":1, "special": false, "text": "the mocked "}}\n\ndata:{"token":{"id":2, "special": false, "text": "model response"}, "details":{"finish_reason": "length", "generated_tokens": 10, "seed": 0}}\n\n' - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name), body=streaming_response, status=200, @@ -158,19 +212,24 @@ def mock_hf_text_generation_api_streaming(): }, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_chat_completion_api(): +def mock_hf_chat_completion_api(httpx_mock): # type: () -> Any """Mock HuggingFace chat completion API""" with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={ "id": model_name, @@ -187,8 +246,10 @@ def mock_hf_chat_completion_api(): ) # Mock chat completion endpoint - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", json={ "id": "xyz-123", @@ -214,19 +275,24 @@ def mock_hf_chat_completion_api(): status=200, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_chat_completion_api_tools(): +def mock_hf_chat_completion_api_tools(httpx_mock): # type: () -> Any """Mock HuggingFace chat completion API with tool calls.""" with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={ "id": model_name, @@ -243,8 +309,10 @@ def mock_hf_chat_completion_api_tools(): ) # Mock chat completion endpoint - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", json={ "id": "xyz-123", @@ -279,19 +347,24 @@ def mock_hf_chat_completion_api_tools(): status=200, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_chat_completion_api_streaming(): +def mock_hf_chat_completion_api_streaming(httpx_mock): # type: () -> Any """Mock streaming HuggingFace chat completion API""" with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={ "id": model_name, @@ -313,8 +386,10 @@ def mock_hf_chat_completion_api_streaming(): b'data:{"id":"xyz-124","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","content":"model response"},"index":0,"finish_reason":"stop"}],"usage":{"prompt_tokens":183,"completion_tokens":14,"total_tokens":197}}\n\n' ) - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", body=streaming_chat_response, status=200, @@ -325,19 +400,24 @@ def mock_hf_chat_completion_api_streaming(): }, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps @pytest.fixture -def mock_hf_chat_completion_api_streaming_tools(): +def mock_hf_chat_completion_api_streaming_tools(httpx_mock): # type: () -> Any """Mock streaming HuggingFace chat completion API with tool calls.""" with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: model_name = "test-model" # Mock model info endpoint - rsps.add( - responses.GET, + _add_mock_response( + httpx_mock, + rsps, + "GET", MODEL_ENDPOINT.format(model_name=model_name), json={ "id": model_name, @@ -359,8 +439,10 @@ def mock_hf_chat_completion_api_streaming_tools(): b'data:{"id":"xyz-124","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","tool_calls": [{"id": "call_123","type": "function","function": {"name": "get_weather", "arguments": {"location": "Paris"}}}]},"index":0,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":183,"completion_tokens":14,"total_tokens":197}}\n\n' ) - rsps.add( - responses.POST, + _add_mock_response( + httpx_mock, + rsps, + "POST", INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", body=streaming_chat_response, status=200, @@ -371,9 +453,13 @@ def mock_hf_chat_completion_api_streaming_tools(): }, ) - yield rsps + if HF_VERSION >= (1, 0, 0): + yield httpx_mock + else: + yield rsps +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation( @@ -401,7 +487,18 @@ def test_text_generation( ) (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.generate_text" assert span["description"] == "generate_text test-model" @@ -431,6 +528,7 @@ def test_text_generation( assert "gen_ai.response.model" not in span["data"] +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation_streaming( @@ -459,7 +557,18 @@ def test_text_generation_streaming( pass (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.generate_text" assert span["description"] == "generate_text test-model" @@ -489,6 +598,7 @@ def test_text_generation_streaming( assert "gen_ai.response.model" not in span["data"] +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion( @@ -515,7 +625,18 @@ def test_chat_completion( ) (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.chat" assert span["description"] == "chat test-model" @@ -549,6 +670,7 @@ def test_chat_completion( assert span["data"] == expected_data +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming( @@ -577,7 +699,18 @@ def test_chat_completion_streaming( ) (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.chat" assert span["description"] == "chat test-model" @@ -611,6 +744,7 @@ def test_chat_completion_streaming( assert span["data"] == expected_data +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_chat_completion_api_error( sentry_init, capture_events, mock_hf_api_with_errors ): @@ -634,7 +768,17 @@ def test_chat_completion_api_error( assert error["exception"]["values"][0]["mechanism"]["type"] == "huggingface_hub" assert not error["exception"]["values"][0]["mechanism"]["handled"] - (span,) = transaction["spans"] + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.chat" assert span["description"] == "chat test-model" @@ -654,6 +798,7 @@ def test_chat_completion_api_error( assert span["data"] == expected_data +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_span_status_error(sentry_init, capture_events, mock_hf_api_with_errors): # type: (Any, Any, Any) -> None sentry_init(traces_sample_rate=1.0) @@ -669,10 +814,24 @@ def test_span_status_error(sentry_init, capture_events, mock_hf_api_with_errors) (error, transaction) = events assert error["level"] == "error" - assert transaction["spans"][0]["tags"]["status"] == "error" + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None + assert span["tags"]["status"] == "error" + assert transaction["contexts"]["trace"]["status"] == "error" +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_with_tools( @@ -715,7 +874,18 @@ def test_chat_completion_with_tools( ) (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.chat" assert span["description"] == "chat test-model" @@ -750,6 +920,7 @@ def test_chat_completion_with_tools( assert span["data"] == expected_data +@pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @pytest.mark.parametrize("send_default_pii", [True, False]) @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming_with_tools( @@ -795,7 +966,18 @@ def test_chat_completion_streaming_with_tools( ) (transaction,) = events - (span,) = transaction["spans"] + + span = None + for sp in transaction["spans"]: + if sp["op"].startswith("gen_ai"): + assert span is None, "there is exactly one gen_ai span" + span = sp + else: + # there should be no other spans, just the gen_ai span + # and optionally some http.client spans from talking to the hf api + assert sp["op"] == "http.client" + + assert span is not None assert span["op"] == "gen_ai.chat" assert span["description"] == "chat test-model" diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index ba49b2e508..661208432f 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -817,3 +817,144 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 + + +def test_langchain_message_role_mapping(sentry_init, capture_events): + """Test that message roles are properly normalized in langchain integration.""" + global llm_type + llm_type = "openai-chat" + + sentry_init( + integrations=[LangchainIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful assistant"), + ("human", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + + global stream_result_mock + stream_result_mock = Mock( + side_effect=[ + [ + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk(content="Test response"), + ), + ] + ] + ) + + llm = MockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + # Test input that should trigger message role normalization + test_input = "Hello, how are you?" + + with start_transaction(): + list(agent_executor.stream({"input": test_input})) + + assert len(events) > 0 + tx = events[0] + assert tx["type"] == "transaction" + + # Find spans with gen_ai operation that should have message data + gen_ai_spans = [ + span for span in tx.get("spans", []) if span.get("op", "").startswith("gen_ai") + ] + + # Check if any span has message data with normalized roles + message_data_found = False + for span in gen_ai_spans: + span_data = span.get("data", {}) + if SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data: + message_data_found = True + messages_data = span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] + + # Parse the message data (might be JSON string) + if isinstance(messages_data, str): + import json + + try: + messages = json.loads(messages_data) + except json.JSONDecodeError: + # If not valid JSON, skip this assertion + continue + else: + messages = messages_data + + # Verify that the input message is present and contains the test input + assert isinstance(messages, list) + assert len(messages) > 0 + + # The test input should be in one of the messages + input_found = False + for msg in messages: + if isinstance(msg, dict) and test_input in str(msg.get("content", "")): + input_found = True + break + elif isinstance(msg, str) and test_input in msg: + input_found = True + break + + assert input_found, ( + f"Test input '{test_input}' not found in messages: {messages}" + ) + break + + # The message role mapping functionality is primarily tested through the normalization + # that happens in the integration code. The fact that we can capture and process + # the messages without errors indicates the role mapping is working correctly. + assert message_data_found, "No span found with gen_ai request messages data" + + +def test_langchain_message_role_normalization_units(): + """Test the message role normalization functions directly.""" + from sentry_sdk.ai.utils import normalize_message_role, normalize_message_roles + + # Test individual role normalization + assert normalize_message_role("ai") == "assistant" + assert normalize_message_role("human") == "user" + assert normalize_message_role("tool_call") == "tool" + assert normalize_message_role("system") == "system" + assert normalize_message_role("user") == "user" + assert normalize_message_role("assistant") == "assistant" + assert normalize_message_role("tool") == "tool" + + # Test unknown role (should remain unchanged) + assert normalize_message_role("unknown_role") == "unknown_role" + + # Test message list normalization + test_messages = [ + {"role": "human", "content": "Hello"}, + {"role": "ai", "content": "Hi there!"}, + {"role": "tool_call", "content": "function_call"}, + {"role": "system", "content": "You are helpful"}, + {"content": "Message without role"}, + "string message", + ] + + normalized = normalize_message_roles(test_messages) + + # Verify the original messages are not modified + assert test_messages[0]["role"] == "human" # Original unchanged + assert test_messages[1]["role"] == "ai" # Original unchanged + + # Verify the normalized messages have correct roles + assert normalized[0]["role"] == "user" # human -> user + assert normalized[1]["role"] == "assistant" # ai -> assistant + assert normalized[2]["role"] == "tool" # tool_call -> tool + assert normalized[3]["role"] == "system" # system unchanged + assert "role" not in normalized[4] # Message without role unchanged + assert normalized[5] == "string message" # String message unchanged diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index 1510305b06..6ec6d9a96d 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -625,3 +625,74 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[0]["function"]["name"] == "search" assert tool_calls_data[1]["id"] == "call_multi_2" assert tool_calls_data[1]["function"]["name"] == "calculate" + + +def test_langgraph_message_role_mapping(sentry_init, capture_events): + """Test that Langgraph integration properly maps message roles like 'ai' to 'assistant'""" + sentry_init( + integrations=[LanggraphIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Mock a langgraph message with mixed roles + class MockMessage: + def __init__(self, content, message_type="human"): + self.content = content + self.type = message_type + + # Create mock state with messages having different roles + state_data = { + "messages": [ + MockMessage("System prompt", "system"), + MockMessage("Hello", "human"), + MockMessage("Hi there!", "ai"), # Should be mapped to "assistant" + MockMessage("How can I help?", "assistant"), # Should stay "assistant" + ] + } + + compiled_graph = MockCompiledGraph("test_graph") + pregel = MockPregelInstance(compiled_graph) + + with start_transaction(name="langgraph tx"): + # Use the wrapped invoke function directly + from sentry_sdk.integrations.langgraph import _wrap_pregel_invoke + + wrapped_invoke = _wrap_pregel_invoke( + lambda self, state_data: {"result": "success"} + ) + wrapped_invoke(pregel, state_data) + + (event,) = events + span = event["spans"][0] + + # Verify that the span was created correctly + assert span["op"] == "gen_ai.invoke_agent" + + # If messages were captured, verify role mapping + if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"]: + import json + + stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + + # Find messages with specific content to verify role mapping + ai_message = next( + (msg for msg in stored_messages if msg.get("content") == "Hi there!"), None + ) + assistant_message = next( + (msg for msg in stored_messages if msg.get("content") == "How can I help?"), + None, + ) + + if ai_message: + # "ai" should have been mapped to "assistant" + assert ai_message["role"] == "assistant" + + if assistant_message: + # "assistant" should stay "assistant" + assert assistant_message["role"] == "assistant" + + # Verify no "ai" roles remain + roles = [msg["role"] for msg in stored_messages if "role" in msg] + assert "ai" not in roles diff --git a/tests/integrations/litellm/__init__.py b/tests/integrations/litellm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py new file mode 100644 index 0000000000..19ae206c85 --- /dev/null +++ b/tests/integrations/litellm/test_litellm.py @@ -0,0 +1,548 @@ +import pytest +from unittest import mock +from datetime import datetime + +try: + from unittest.mock import AsyncMock +except ImportError: + + class AsyncMock(mock.MagicMock): + async def __call__(self, *args, **kwargs): + return super(AsyncMock, self).__call__(*args, **kwargs) + + +try: + import litellm +except ImportError: + pytest.skip("litellm not installed", allow_module_level=True) + +from sentry_sdk import start_transaction +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations.litellm import ( + LiteLLMIntegration, + _input_callback, + _success_callback, + _failure_callback, +) +from sentry_sdk.utils import package_version + + +LITELLM_VERSION = package_version("litellm") + + +# Mock response objects +class MockMessage: + def __init__(self, role="assistant", content="Test response"): + self.role = role + self.content = content + self.tool_calls = None + + def model_dump(self): + return {"role": self.role, "content": self.content} + + +class MockChoice: + def __init__(self, message=None): + self.message = message or MockMessage() + self.index = 0 + self.finish_reason = "stop" + + +class MockUsage: + def __init__(self, prompt_tokens=10, completion_tokens=20, total_tokens=30): + self.prompt_tokens = prompt_tokens + self.completion_tokens = completion_tokens + self.total_tokens = total_tokens + + +class MockCompletionResponse: + def __init__( + self, + model="gpt-3.5-turbo", + choices=None, + usage=None, + ): + self.id = "chatcmpl-test" + self.model = model + self.choices = choices or [MockChoice()] + self.usage = usage or MockUsage() + self.object = "chat.completion" + self.created = 1234567890 + + +class MockEmbeddingData: + def __init__(self, embedding=None): + self.embedding = embedding or [0.1, 0.2, 0.3] + self.index = 0 + self.object = "embedding" + + +class MockEmbeddingResponse: + def __init__(self, model="text-embedding-ada-002", data=None, usage=None): + self.model = model + self.data = data or [MockEmbeddingData()] + self.usage = usage or MockUsage( + prompt_tokens=5, completion_tokens=0, total_tokens=5 + ) + self.object = "list" + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_nonstreaming_chat_completion( + sentry_init, capture_events, send_default_pii, include_prompts +): + sentry_init( + integrations=[LiteLLMIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + # Simulate what litellm does: call input callback, then success callback + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert event["transaction"] == "litellm test" + + assert len(event["spans"]) == 1 + (span,) = event["spans"] + + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat gpt-3.5-turbo" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" + assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" + assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + if send_default_pii and include_prompts: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] + else: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_streaming_chat_completion( + sentry_init, capture_events, send_default_pii, include_prompts +): + sentry_init( + integrations=[LiteLLMIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + "stream": True, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert len(event["spans"]) == 1 + (span,) = event["spans"] + + assert span["op"] == OP.GEN_AI_CHAT + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + +def test_embeddings_create(sentry_init, capture_events): + sentry_init( + integrations=[LiteLLMIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Some text to test embeddings"}] + mock_response = MockEmbeddingResponse() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "text-embedding-ada-002", + "input": "Hello!", + "messages": messages, + "call_type": "embedding", + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert len(event["spans"]) == 1 + (span,) = event["spans"] + + assert span["op"] == OP.GEN_AI_EMBEDDINGS + assert span["description"] == "embeddings text-embedding-ada-002" + assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + + +def test_exception_handling(sentry_init, capture_events): + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + _input_callback(kwargs) + _failure_callback( + kwargs, + Exception("API rate limit reached"), + datetime.now(), + datetime.now(), + ) + + # Should have error event and transaction + assert len(events) >= 1 + # Find the error event + error_events = [e for e in events if e.get("level") == "error"] + assert len(error_events) == 1 + + +def test_span_origin(sentry_init, capture_events): + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + + assert event["contexts"]["trace"]["origin"] == "manual" + assert event["spans"][0]["origin"] == "auto.ai.litellm" + + +def test_multiple_providers(sentry_init, capture_events): + """Test that the integration correctly identifies different providers.""" + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + + # Test with different model prefixes + test_cases = [ + ("gpt-3.5-turbo", "openai"), + ("claude-3-opus-20240229", "anthropic"), + ("gemini/gemini-pro", "gemini"), + ] + + for model, _ in test_cases: + mock_response = MockCompletionResponse(model=model) + with start_transaction(name=f"test {model}"): + kwargs = { + "model": model, + "messages": messages, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + assert len(events) == len(test_cases) + + for i in range(len(test_cases)): + span = events[i]["spans"][0] + # The provider should be detected by litellm.get_llm_provider + assert SPANDATA.GEN_AI_SYSTEM in span["data"] + + +def test_additional_parameters(sentry_init, capture_events): + """Test that additional parameters are captured.""" + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + "temperature": 0.7, + "max_tokens": 100, + "top_p": 0.9, + "frequency_penalty": 0.5, + "presence_penalty": 0.5, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + (span,) = event["spans"] + + assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 + assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 + + +def test_litellm_specific_parameters(sentry_init, capture_events): + """Test that LiteLLM-specific parameters are captured.""" + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + "api_base": "https://custom-api.example.com", + "api_version": "2023-01-01", + "custom_llm_provider": "custom_provider", + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + (span,) = event["spans"] + + assert span["data"]["gen_ai.litellm.api_base"] == "https://custom-api.example.com" + assert span["data"]["gen_ai.litellm.api_version"] == "2023-01-01" + assert span["data"]["gen_ai.litellm.custom_llm_provider"] == "custom_provider" + + +def test_no_integration(sentry_init, capture_events): + """Test that when integration is not enabled, callbacks don't break.""" + sentry_init( + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + mock_response = MockCompletionResponse() + + with start_transaction(name="litellm test"): + # When the integration isn't enabled, the callbacks should exit early + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + # These should not crash, just do nothing + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + # Should still have the transaction, but no child spans since integration is off + assert event["type"] == "transaction" + assert len(event.get("spans", [])) == 0 + + +def test_response_without_usage(sentry_init, capture_events): + """Test handling of responses without usage information.""" + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + + # Create a mock response without usage + mock_response = type( + "obj", + (object,), + { + "model": "gpt-3.5-turbo", + "choices": [MockChoice()], + }, + )() + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + (span,) = event["spans"] + + # Span should still be created even without usage info + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat gpt-3.5-turbo" + + +def test_integration_setup(sentry_init): + """Test that the integration sets up the callbacks correctly.""" + sentry_init( + integrations=[LiteLLMIntegration()], + traces_sample_rate=1.0, + ) + + # Check that callbacks are registered + assert _input_callback in (litellm.input_callback or []) + assert _success_callback in (litellm.success_callback or []) + assert _failure_callback in (litellm.failure_callback or []) + + +def test_message_dict_extraction(sentry_init, capture_events): + """Test that response messages are properly extracted with dict() fallback.""" + sentry_init( + integrations=[LiteLLMIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + messages = [{"role": "user", "content": "Hello!"}] + + # Create a message that has dict() method instead of model_dump() + class DictMessage: + def __init__(self): + self.role = "assistant" + self.content = "Response" + self.tool_calls = None + + def dict(self): + return {"role": self.role, "content": self.content} + + mock_response = MockCompletionResponse(choices=[MockChoice(message=DictMessage())]) + + with start_transaction(name="litellm test"): + kwargs = { + "model": "gpt-3.5-turbo", + "messages": messages, + } + + _input_callback(kwargs) + _success_callback( + kwargs, + mock_response, + datetime.now(), + datetime.now(), + ) + + (event,) = events + (span,) = event["spans"] + + # Should have extracted the response message + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index e7fbf8a7d8..276a1b4886 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -7,6 +7,11 @@ except ImportError: NOT_GIVEN = None +try: + from openai import omit +except ImportError: + omit = None + from openai import AsyncOpenAI, OpenAI, AsyncStream, Stream, OpenAIError from openai.types import CompletionUsage, CreateEmbeddingResponse, Embedding from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionChunk @@ -1424,7 +1429,7 @@ async def test_streaming_responses_api_async( ) @pytest.mark.parametrize( "tools", - [[], None, NOT_GIVEN], + [[], None, NOT_GIVEN, omit], ) def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): sentry_init( @@ -1447,3 +1452,56 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): span = event["spans"][0] assert "gen_ai.request.available_tools" not in span["data"] + + +def test_openai_message_role_mapping(sentry_init, capture_events): + """Test that OpenAI integration properly maps message roles like 'ai' to 'assistant'""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + client = OpenAI(api_key="z") + client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) + + # Test messages with mixed roles including "ai" that should be mapped to "assistant" + test_messages = [ + {"role": "system", "content": "You are helpful."}, + {"role": "user", "content": "Hello"}, + {"role": "ai", "content": "Hi there!"}, # Should be mapped to "assistant" + {"role": "assistant", "content": "How can I help?"}, # Should stay "assistant" + ] + + with start_transaction(name="openai tx"): + client.chat.completions.create(model="test-model", messages=test_messages) + + (event,) = events + span = event["spans"][0] + + # Verify that the span was created correctly + assert span["op"] == "gen_ai.chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + + # Parse the stored messages + import json + + stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + + # Verify that "ai" role was mapped to "assistant" + assert len(stored_messages) == 4 + assert stored_messages[0]["role"] == "system" + assert stored_messages[1]["role"] == "user" + assert ( + stored_messages[2]["role"] == "assistant" + ) # "ai" should be mapped to "assistant" + assert stored_messages[3]["role"] == "assistant" # should stay "assistant" + + # Verify content is preserved + assert stored_messages[2]["content"] == "Hi there!" + assert stored_messages[3]["content"] == "How can I help?" + + # Verify no "ai" roles remain + roles = [msg["role"] for msg in stored_messages] + assert "ai" not in roles diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index bd7f15faff..e647ce9fad 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -6,6 +6,7 @@ from sentry_sdk.integrations.openai_agents import OpenAIAgentsIntegration from sentry_sdk.integrations.openai_agents.utils import safe_serialize +from sentry_sdk.utils import parse_version import agents from agents import ( @@ -15,10 +16,12 @@ ModelSettings, ) from agents.items import ( + McpCall, ResponseOutputMessage, ResponseOutputText, ResponseFunctionToolCall, ) +from agents.version import __version__ as OPENAI_AGENTS_VERSION from openai.types.responses.response_usage import ( InputTokensDetails, @@ -437,24 +440,28 @@ def simple_test_tool(message: str) -> str: ai_client_span2, ) = spans - available_tools = safe_serialize( - [ - { - "name": "simple_test_tool", - "description": "A simple tool", - "params_json_schema": { - "properties": {"message": {"title": "Message", "type": "string"}}, - "required": ["message"], - "title": "simple_test_tool_args", - "type": "object", - "additionalProperties": False, - }, - "on_invoke_tool": "._create_function_tool.._on_invoke_tool>", - "strict_json_schema": True, - "is_enabled": True, - } - ] - ) + available_tools = [ + { + "name": "simple_test_tool", + "description": "A simple tool", + "params_json_schema": { + "properties": {"message": {"title": "Message", "type": "string"}}, + "required": ["message"], + "title": "simple_test_tool_args", + "type": "object", + "additionalProperties": False, + }, + "on_invoke_tool": "._create_function_tool.._on_invoke_tool>", + "strict_json_schema": True, + "is_enabled": True, + } + ] + if parse_version(OPENAI_AGENTS_VERSION) >= (0, 3, 3): + available_tools[0].update( + {"tool_input_guardrails": None, "tool_output_guardrails": None} + ) + + available_tools = safe_serialize(available_tools) assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" @@ -683,6 +690,307 @@ async def test_span_status_error(sentry_init, capture_events, test_agent): assert transaction["contexts"]["trace"]["status"] == "error" +@pytest.mark.asyncio +async def test_mcp_tool_execution_spans(sentry_init, capture_events, test_agent): + """ + Test that MCP (Model Context Protocol) tool calls create execute_tool spans. + """ + + with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): + with patch( + "agents.models.openai_responses.OpenAIResponsesModel.get_response" + ) as mock_get_response: + # Create a McpCall object + mcp_call = McpCall( + id="mcp_call_123", + name="test_mcp_tool", + arguments='{"query": "search term"}', + output="MCP tool executed successfully", + error=None, + type="mcp_call", + server_label="test_server", + ) + + # Create a ModelResponse with an McpCall in the output + mcp_response = ModelResponse( + output=[mcp_call], + usage=Usage( + requests=1, + input_tokens=10, + output_tokens=5, + total_tokens=15, + ), + response_id="resp_mcp_123", + ) + + # Final response after MCP tool execution + final_response = ModelResponse( + output=[ + ResponseOutputMessage( + id="msg_final", + type="message", + status="completed", + content=[ + ResponseOutputText( + text="Task completed using MCP tool", + type="output_text", + annotations=[], + ) + ], + role="assistant", + ) + ], + usage=Usage( + requests=1, + input_tokens=15, + output_tokens=10, + total_tokens=25, + ), + response_id="resp_final_123", + ) + + mock_get_response.side_effect = [mcp_response, final_response] + + sentry_init( + integrations=[OpenAIAgentsIntegration()], + traces_sample_rate=1.0, + send_default_pii=True, + ) + + events = capture_events() + + await agents.Runner.run( + test_agent, + "Please use MCP tool", + run_config=test_run_config, + ) + + (transaction,) = events + spans = transaction["spans"] + + # Find the MCP execute_tool span + mcp_tool_span = None + for span in spans: + if ( + span.get("description") == "execute_tool test_mcp_tool" + and span.get("data", {}).get("gen_ai.tool.type") == "mcp" + ): + mcp_tool_span = span + break + + # Verify the MCP tool span was created + assert mcp_tool_span is not None, "MCP execute_tool span was not created" + assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["data"]["gen_ai.tool.type"] == "mcp" + assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" + assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "search term"}' + assert ( + mcp_tool_span["data"]["gen_ai.tool.output"] == "MCP tool executed successfully" + ) + + # Verify no error status since error was None + assert mcp_tool_span.get("tags", {}).get("status") != "error" + + +@pytest.mark.asyncio +async def test_mcp_tool_execution_with_error(sentry_init, capture_events, test_agent): + """ + Test that MCP tool calls with errors are tracked with error status. + """ + + with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): + with patch( + "agents.models.openai_responses.OpenAIResponsesModel.get_response" + ) as mock_get_response: + # Create a McpCall object with an error + mcp_call_with_error = McpCall( + id="mcp_call_error_123", + name="failing_mcp_tool", + arguments='{"query": "test"}', + output=None, + error="MCP tool execution failed", + type="mcp_call", + server_label="test_server", + ) + + # Create a ModelResponse with a failing McpCall + mcp_response = ModelResponse( + output=[mcp_call_with_error], + usage=Usage( + requests=1, + input_tokens=10, + output_tokens=5, + total_tokens=15, + ), + response_id="resp_mcp_error_123", + ) + + # Final response after error + final_response = ModelResponse( + output=[ + ResponseOutputMessage( + id="msg_final", + type="message", + status="completed", + content=[ + ResponseOutputText( + text="The MCP tool encountered an error", + type="output_text", + annotations=[], + ) + ], + role="assistant", + ) + ], + usage=Usage( + requests=1, + input_tokens=15, + output_tokens=10, + total_tokens=25, + ), + response_id="resp_final_error_123", + ) + + mock_get_response.side_effect = [mcp_response, final_response] + + sentry_init( + integrations=[OpenAIAgentsIntegration()], + traces_sample_rate=1.0, + send_default_pii=True, + ) + + events = capture_events() + + await agents.Runner.run( + test_agent, + "Please use failing MCP tool", + run_config=test_run_config, + ) + + (transaction,) = events + spans = transaction["spans"] + + # Find the MCP execute_tool span with error + mcp_tool_span = None + for span in spans: + if ( + span.get("description") == "execute_tool failing_mcp_tool" + and span.get("data", {}).get("gen_ai.tool.type") == "mcp" + ): + mcp_tool_span = span + break + + # Verify the MCP tool span was created with error status + assert mcp_tool_span is not None, "MCP execute_tool span was not created" + assert mcp_tool_span["description"] == "execute_tool failing_mcp_tool" + assert mcp_tool_span["data"]["gen_ai.tool.type"] == "mcp" + assert mcp_tool_span["data"]["gen_ai.tool.name"] == "failing_mcp_tool" + assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "test"}' + assert mcp_tool_span["data"]["gen_ai.tool.output"] is None + + # Verify error status was set + assert mcp_tool_span["tags"]["status"] == "error" + + +@pytest.mark.asyncio +async def test_mcp_tool_execution_without_pii(sentry_init, capture_events, test_agent): + """ + Test that MCP tool input/output are not included when send_default_pii is False. + """ + + with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): + with patch( + "agents.models.openai_responses.OpenAIResponsesModel.get_response" + ) as mock_get_response: + # Create a McpCall object + mcp_call = McpCall( + id="mcp_call_pii_123", + name="test_mcp_tool", + arguments='{"query": "sensitive data"}', + output="Result with sensitive info", + error=None, + type="mcp_call", + server_label="test_server", + ) + + # Create a ModelResponse with an McpCall + mcp_response = ModelResponse( + output=[mcp_call], + usage=Usage( + requests=1, + input_tokens=10, + output_tokens=5, + total_tokens=15, + ), + response_id="resp_mcp_123", + ) + + # Final response + final_response = ModelResponse( + output=[ + ResponseOutputMessage( + id="msg_final", + type="message", + status="completed", + content=[ + ResponseOutputText( + text="Task completed", + type="output_text", + annotations=[], + ) + ], + role="assistant", + ) + ], + usage=Usage( + requests=1, + input_tokens=15, + output_tokens=10, + total_tokens=25, + ), + response_id="resp_final_123", + ) + + mock_get_response.side_effect = [mcp_response, final_response] + + sentry_init( + integrations=[OpenAIAgentsIntegration()], + traces_sample_rate=1.0, + send_default_pii=False, # PII disabled + ) + + events = capture_events() + + await agents.Runner.run( + test_agent, + "Please use MCP tool", + run_config=test_run_config, + ) + + (transaction,) = events + spans = transaction["spans"] + + # Find the MCP execute_tool span + mcp_tool_span = None + for span in spans: + if ( + span.get("description") == "execute_tool test_mcp_tool" + and span.get("data", {}).get("gen_ai.tool.type") == "mcp" + ): + mcp_tool_span = span + break + + # Verify the MCP tool span was created but without input/output + assert mcp_tool_span is not None, "MCP execute_tool span was not created" + assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["data"]["gen_ai.tool.type"] == "mcp" + assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" + + # Verify input and output are not included when send_default_pii is False + assert "gen_ai.tool.input" not in mcp_tool_span["data"] + assert "gen_ai.tool.output" not in mcp_tool_span["data"] + + @pytest.mark.asyncio async def test_multiple_agents_asyncio( sentry_init, capture_events, test_agent, mock_model_response @@ -723,3 +1031,49 @@ async def run(): assert txn2["transaction"] == "test_agent workflow" assert txn3["type"] == "transaction" assert txn3["transaction"] == "test_agent workflow" + + +def test_openai_agents_message_role_mapping(sentry_init, capture_events): + """Test that OpenAI Agents integration properly maps message roles like 'ai' to 'assistant'""" + sentry_init( + integrations=[OpenAIAgentsIntegration()], + traces_sample_rate=1.0, + send_default_pii=True, + ) + + # Test input messages with mixed roles including "ai" + test_input = [ + {"role": "system", "content": "You are helpful."}, + {"role": "user", "content": "Hello"}, + {"role": "ai", "content": "Hi there!"}, # Should be mapped to "assistant" + {"role": "assistant", "content": "How can I help?"}, # Should stay "assistant" + ] + + get_response_kwargs = {"input": test_input} + + from sentry_sdk.integrations.openai_agents.utils import _set_input_data + from sentry_sdk import start_span + + with start_span(op="test") as span: + _set_input_data(span, get_response_kwargs) + + # Verify that messages were processed and roles were mapped + from sentry_sdk.consts import SPANDATA + + if SPANDATA.GEN_AI_REQUEST_MESSAGES in span._data: + import json + + stored_messages = json.loads(span._data[SPANDATA.GEN_AI_REQUEST_MESSAGES]) + + # Verify roles were properly mapped + found_assistant_roles = 0 + for message in stored_messages: + if message["role"] == "assistant": + found_assistant_roles += 1 + + # Should have 2 assistant roles (1 from original "assistant", 1 from mapped "ai") + assert found_assistant_roles == 2 + + # Verify no "ai" roles remain in any message + for message in stored_messages: + assert message["role"] != "ai" diff --git a/tests/integrations/ray/test_ray.py b/tests/integrations/ray/test_ray.py index f4e67df038..6aaced391e 100644 --- a/tests/integrations/ray/test_ray.py +++ b/tests/integrations/ray/test_ray.py @@ -100,6 +100,9 @@ def example_task(): else: example_task = ray.remote(example_task) + # Function name shouldn't be overwritten by Sentry wrapper + assert example_task._function_name == "tests.integrations.ray.test_ray.example_task" + with sentry_sdk.start_transaction(op="task", name="ray test transaction"): worker_envelopes = ray.get(example_task.remote()) diff --git a/tests/integrations/stdlib/__init__.py b/tests/integrations/stdlib/__init__.py new file mode 100644 index 0000000000..472e0151b2 --- /dev/null +++ b/tests/integrations/stdlib/__init__.py @@ -0,0 +1,6 @@ +import os +import sys + +# Load `httplib_helpers` into the module search path to test request source path names relative to module. See +# `test_request_source_with_module_in_search_path` +sys.path.insert(0, os.path.join(os.path.dirname(__file__))) diff --git a/tests/integrations/stdlib/httplib_helpers/__init__.py b/tests/integrations/stdlib/httplib_helpers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integrations/stdlib/httplib_helpers/helpers.py b/tests/integrations/stdlib/httplib_helpers/helpers.py new file mode 100644 index 0000000000..875052e7b5 --- /dev/null +++ b/tests/integrations/stdlib/httplib_helpers/helpers.py @@ -0,0 +1,3 @@ +def get_request_with_connection(connection, url): + connection.request("GET", url) + connection.getresponse() diff --git a/tests/integrations/stdlib/test_httplib.py b/tests/integrations/stdlib/test_httplib.py index b8d46d0558..9bd53d6ad1 100644 --- a/tests/integrations/stdlib/test_httplib.py +++ b/tests/integrations/stdlib/test_httplib.py @@ -1,3 +1,5 @@ +import os +import datetime from http.client import HTTPConnection, HTTPSConnection from socket import SocketIO from urllib.error import HTTPError @@ -374,6 +376,234 @@ def test_option_trace_propagation_targets( assert "baggage" not in request_headers +@pytest.mark.parametrize("enable_http_request_source", [None, False]) +def test_request_source_disabled( + sentry_init, capture_events, enable_http_request_source +): + sentry_options = { + "traces_sample_rate": 1.0, + "http_request_source_threshold_ms": 0, + } + if enable_http_request_source is not None: + sentry_options["enable_http_request_source"] = enable_http_request_source + + sentry_init(**sentry_options) + + events = capture_events() + + with start_transaction(name="foo"): + conn = HTTPConnection("localhost", port=PORT) + conn.request("GET", "/foo") + conn.getresponse() + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +def test_request_source_enabled(sentry_init, capture_events): + sentry_options = { + "traces_sample_rate": 1.0, + "enable_http_request_source": True, + "http_request_source_threshold_ms": 0, + } + sentry_init(**sentry_options) + + events = capture_events() + + with start_transaction(name="foo"): + conn = HTTPConnection("localhost", port=PORT) + conn.request("GET", "/foo") + conn.getresponse() + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + +def test_request_source(sentry_init, capture_events): + sentry_init( + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + events = capture_events() + + with start_transaction(name="foo"): + conn = HTTPConnection("localhost", port=PORT) + conn.request("GET", "/foo") + conn.getresponse() + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.stdlib.test_httplib" + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/stdlib/test_httplib.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "test_request_source" + + +def test_request_source_with_module_in_search_path(sentry_init, capture_events): + """ + Test that request source is relative to the path of the module it ran in + """ + sentry_init( + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=0, + ) + + events = capture_events() + + with start_transaction(name="foo"): + from httplib_helpers.helpers import get_request_with_connection + + conn = HTTPConnection("localhost", port=PORT) + get_request_with_connection(conn, "/foo") + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "httplib_helpers.helpers" + assert data.get(SPANDATA.CODE_FILEPATH) == "httplib_helpers/helpers.py" + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert data.get(SPANDATA.CODE_FUNCTION) == "get_request_with_connection" + + +def test_no_request_source_if_duration_too_short(sentry_init, capture_events): + sentry_init( + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + already_patched_putrequest = HTTPConnection.putrequest + + class HttpConnectionWithPatchedSpan(HTTPConnection): + def putrequest(self, *args, **kwargs) -> None: + already_patched_putrequest(self, *args, **kwargs) + span = self._sentrysdk_span # type: ignore + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=99999) + + events = capture_events() + + with start_transaction(name="foo"): + conn = HttpConnectionWithPatchedSpan("localhost", port=PORT) + conn.request("GET", "/foo") + conn.getresponse() + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO not in data + assert SPANDATA.CODE_NAMESPACE not in data + assert SPANDATA.CODE_FILEPATH not in data + assert SPANDATA.CODE_FUNCTION not in data + + +def test_request_source_if_duration_over_threshold(sentry_init, capture_events): + sentry_init( + traces_sample_rate=1.0, + enable_http_request_source=True, + http_request_source_threshold_ms=100, + ) + + already_patched_putrequest = HTTPConnection.putrequest + + class HttpConnectionWithPatchedSpan(HTTPConnection): + def putrequest(self, *args, **kwargs) -> None: + already_patched_putrequest(self, *args, **kwargs) + span = self._sentrysdk_span # type: ignore + span.start_timestamp = datetime.datetime(2024, 1, 1, microsecond=0) + span.timestamp = datetime.datetime(2024, 1, 1, microsecond=100001) + + events = capture_events() + + with start_transaction(name="foo"): + conn = HttpConnectionWithPatchedSpan("localhost", port=PORT) + conn.request("GET", "/foo") + conn.getresponse() + + (event,) = events + + span = event["spans"][-1] + assert span["description"].startswith("GET") + + data = span.get("data", {}) + + assert SPANDATA.CODE_LINENO in data + assert SPANDATA.CODE_NAMESPACE in data + assert SPANDATA.CODE_FILEPATH in data + assert SPANDATA.CODE_FUNCTION in data + + assert type(data.get(SPANDATA.CODE_LINENO)) == int + assert data.get(SPANDATA.CODE_LINENO) > 0 + assert data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.stdlib.test_httplib" + assert data.get(SPANDATA.CODE_FILEPATH).endswith( + "tests/integrations/stdlib/test_httplib.py" + ) + + is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + assert is_relative_path + + assert ( + data.get(SPANDATA.CODE_FUNCTION) + == "test_request_source_if_duration_over_threshold" + ) + + def test_span_origin(sentry_init, capture_events): sentry_init(traces_sample_rate=1.0, debug=True) events = capture_events() diff --git a/tests/integrations/threading/test_threading.py b/tests/integrations/threading/test_threading.py index 799298910b..9c9a24aa63 100644 --- a/tests/integrations/threading/test_threading.py +++ b/tests/integrations/threading/test_threading.py @@ -276,3 +276,64 @@ def do_some_work(number): - op="outer-submit-4": description="Thread: main"\ """ ) + + +@pytest.mark.parametrize( + "propagate_scope", + (True, False), + ids=["propagate_scope=True", "propagate_scope=False"], +) +def test_spans_from_threadpool( + sentry_init, capture_events, render_span_tree, propagate_scope +): + sentry_init( + traces_sample_rate=1.0, + integrations=[ThreadingIntegration(propagate_scope=propagate_scope)], + ) + events = capture_events() + + def do_some_work(number): + with sentry_sdk.start_span( + op=f"inner-run-{number}", name=f"Thread: child-{number}" + ): + pass + + with sentry_sdk.start_transaction(op="outer-trx"): + with futures.ThreadPoolExecutor(max_workers=1) as executor: + for number in range(5): + with sentry_sdk.start_span( + op=f"outer-submit-{number}", name="Thread: main" + ): + future = executor.submit(do_some_work, number) + future.result() + + (event,) = events + + if propagate_scope: + assert render_span_tree(event) == dedent( + """\ + - op="outer-trx": description=null + - op="outer-submit-0": description="Thread: main" + - op="inner-run-0": description="Thread: child-0" + - op="outer-submit-1": description="Thread: main" + - op="inner-run-1": description="Thread: child-1" + - op="outer-submit-2": description="Thread: main" + - op="inner-run-2": description="Thread: child-2" + - op="outer-submit-3": description="Thread: main" + - op="inner-run-3": description="Thread: child-3" + - op="outer-submit-4": description="Thread: main" + - op="inner-run-4": description="Thread: child-4"\ +""" + ) + + elif not propagate_scope: + assert render_span_tree(event) == dedent( + """\ + - op="outer-trx": description=null + - op="outer-submit-0": description="Thread: main" + - op="outer-submit-1": description="Thread: main" + - op="outer-submit-2": description="Thread: main" + - op="outer-submit-3": description="Thread: main" + - op="outer-submit-4": description="Thread: main"\ +""" + ) diff --git a/tests/test_envelope.py b/tests/test_envelope.py index 06f8971dc3..d66cd9460a 100644 --- a/tests/test_envelope.py +++ b/tests/test_envelope.py @@ -252,7 +252,6 @@ def test_envelope_item_data_category_mapping(): ("client_report", "internal"), ("profile", "profile"), ("profile_chunk", "profile_chunk"), - ("statsd", "metric_bucket"), ("check_in", "monitor"), ("unknown_type", "default"), ] diff --git a/tests/test_logs.py b/tests/test_logs.py index 596a31922e..1e252c5bfb 100644 --- a/tests/test_logs.py +++ b/tests/test_logs.py @@ -230,7 +230,8 @@ def test_logs_attributes(sentry_init, capture_envelopes): for k, v in attrs.items(): assert logs[0]["attributes"][k] == v assert logs[0]["attributes"]["sentry.environment"] == "production" - assert "sentry.release" in logs[0]["attributes"] + if sentry_sdk.get_client().options.get("release") is not None: + assert "sentry.release" in logs[0]["attributes"] assert logs[0]["attributes"]["sentry.message.parameter.my_var"] == "some value" assert logs[0]["attributes"][SPANDATA.SERVER_ADDRESS] == "test-server" assert logs[0]["attributes"]["sentry.sdk.name"].startswith("sentry.python") diff --git a/tests/test_metrics.py b/tests/test_metrics.py index c02f075288..5e774227fd 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -1,971 +1,208 @@ +import json import sys -import time -import linecache -from unittest import mock - +from typing import List, Any, Mapping import pytest import sentry_sdk -from sentry_sdk import metrics -from sentry_sdk.tracing import TransactionSource -from sentry_sdk.envelope import parse_json - -try: - import gevent -except ImportError: - gevent = None - - -minimum_python_37_with_gevent = pytest.mark.skipif( - gevent and sys.version_info < (3, 7), - reason="Require Python 3.7 or higher with gevent", -) - - -def parse_metrics(bytes): - rv = [] - for line in bytes.splitlines(): - pieces = line.decode("utf-8").split("|") - payload = pieces[0].split(":") - name = payload[0] - values = payload[1:] - ty = pieces[1] - ts = None - tags = {} - for piece in pieces[2:]: - if piece[0] == "#": - for pair in piece[1:].split(","): - k, v = pair.split(":", 1) - old = tags.get(k) - if old is not None: - if isinstance(old, list): - old.append(v) - else: - tags[k] = [old, v] - else: - tags[k] = v - elif piece[0] == "T": - ts = int(piece[1:]) - else: - raise ValueError("unknown piece %r" % (piece,)) - rv.append((ts, name, ty, values, tags)) - rv.sort(key=lambda x: (x[0], x[1], tuple(sorted(tags.items())))) - return rv - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_increment(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) - ts = time.time() - envelopes = capture_envelopes() +from sentry_sdk import _metrics +from sentry_sdk import get_client +from sentry_sdk.envelope import Envelope +from sentry_sdk.types import Metric - metrics.increment("foobar", 1.0, tags={"foo": "bar", "blub": "blah"}, timestamp=ts) - # python specific alias - metrics.incr("foobar", 2.0, tags={"foo": "bar", "blub": "blah"}, timestamp=ts) - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "foobar@none" - assert m[0][2] == "c" - assert m[0][3] == ["3.0"] - assert m[0][4] == { - "blub": "blah", - "foo": "bar", - "release": "fun-release", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - assert parse_json(meta_item.payload.get_bytes()) == { - "timestamp": mock.ANY, - "mapping": { - "c:foobar@none": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ] - }, - } +def envelopes_to_metrics(envelopes): + # type: (List[Envelope]) -> List[Metric] + res = [] # type: List[Metric] + for envelope in envelopes: + for item in envelope.items: + if item.type == "trace_metric": + for metric_json in item.payload.json["items"]: + metric = { + "timestamp": metric_json["timestamp"], + "trace_id": metric_json["trace_id"], + "span_id": metric_json.get("span_id"), + "name": metric_json["name"], + "type": metric_json["type"], + "value": metric_json["value"], + "unit": metric_json.get("unit"), + "attributes": { + k: v["value"] + for (k, v) in metric_json["attributes"].items() + }, + } # type: Metric + res.append(metric) + return res -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_timing(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) - ts = time.time() - envelopes = capture_envelopes() - with metrics.timing("whatever", tags={"blub": "blah"}, timestamp=ts): - time.sleep(0.1) - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "whatever@second" - assert m[0][2] == "d" - assert len(m[0][3]) == 1 - assert float(m[0][3][0]) >= 0.1 - assert m[0][4] == { - "blub": "blah", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - json = parse_json(meta_item.payload.get_bytes()) - assert json == { - "timestamp": mock.ANY, - "mapping": { - "d:whatever@second": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ] - }, - } - - loc = json["mapping"]["d:whatever@second"][0] - line = linecache.getline(loc["abs_path"], loc["lineno"]) - assert ( - line.strip() - == 'with metrics.timing("whatever", tags={"blub": "blah"}, timestamp=ts):' - ) +def test_metrics_disabled_by_default(sentry_init, capture_envelopes): + sentry_init() - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_timing_decorator( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) envelopes = capture_envelopes() - @metrics.timing("whatever-1", tags={"x": "y"}) - def amazing(): - time.sleep(0.1) - return 42 - - @metrics.timing("whatever-2", tags={"x": "y"}, unit="nanosecond") - def amazing_nano(): - time.sleep(0.01) - return 23 - - assert amazing() == 42 - assert amazing_nano() == 23 - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 2 - assert m[0][1] == "whatever-1@second" - assert m[0][2] == "d" - assert len(m[0][3]) == 1 - assert float(m[0][3][0]) >= 0.1 - assert m[0][4] == { - "x": "y", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert m[1][1] == "whatever-2@nanosecond" - assert m[1][2] == "d" - assert len(m[1][3]) == 1 - assert float(m[1][3][0]) >= 10000000.0 - assert m[1][4] == { - "x": "y", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - json = parse_json(meta_item.payload.get_bytes()) - assert json == { - "timestamp": mock.ANY, - "mapping": { - "d:whatever-1@second": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ], - "d:whatever-2@nanosecond": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ], - }, - } + _metrics.count("test.counter", 1) + _metrics.gauge("test.gauge", 42) + _metrics.distribution("test.distribution", 200) - # XXX: this is not the best location. It would probably be better to - # report the location in the function, however that is quite a bit - # tricker to do since we report from outside the function so we really - # only see the callsite. - loc = json["mapping"]["d:whatever-1@second"][0] - line = linecache.getline(loc["abs_path"], loc["lineno"]) - assert line.strip() == "assert amazing() == 42" + assert len(envelopes) == 0 -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_timing_basic(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) - ts = time.time() +def test_metrics_basics(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_metrics": True}) envelopes = capture_envelopes() - metrics.timing("timing", 1.0, tags={"a": "b"}, timestamp=ts) - metrics.timing("timing", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.timing("timing", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.timing("timing", 3.0, tags={"a": "b"}, timestamp=ts) - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "timing@second" - assert m[0][2] == "d" - assert len(m[0][3]) == 4 - assert sorted(map(float, m[0][3])) == [1.0, 2.0, 2.0, 3.0] - assert m[0][4] == { - "a": "b", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - assert parse_json(meta_item.payload.get_bytes()) == { - "timestamp": mock.ANY, - "mapping": { - "d:timing@second": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ] - }, - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_distribution(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) - ts = time.time() - envelopes = capture_envelopes() + _metrics.count("test.counter", 1) + _metrics.gauge("test.gauge", 42, unit="millisecond") + _metrics.distribution("test.distribution", 200, unit="second") - metrics.distribution("dist", 1.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 3.0, tags={"a": "b"}, timestamp=ts) - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "dist@none" - assert m[0][2] == "d" - assert len(m[0][3]) == 4 - assert sorted(map(float, m[0][3])) == [1.0, 2.0, 2.0, 3.0] - assert m[0][4] == { - "a": "b", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - json = parse_json(meta_item.payload.get_bytes()) - assert json == { - "timestamp": mock.ANY, - "mapping": { - "d:dist@none": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ] - }, - } + get_client().flush() + metrics = envelopes_to_metrics(envelopes) - loc = json["mapping"]["d:dist@none"][0] - line = linecache.getline(loc["abs_path"], loc["lineno"]) - assert ( - line.strip() - == 'metrics.distribution("dist", 1.0, tags={"a": "b"}, timestamp=ts)' - ) + assert len(metrics) == 3 + assert metrics[0]["name"] == "test.counter" + assert metrics[0]["type"] == "counter" + assert metrics[0]["value"] == 1.0 + assert metrics[0]["unit"] is None + assert "sentry.sdk.name" in metrics[0]["attributes"] + assert "sentry.sdk.version" in metrics[0]["attributes"] -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_set(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": True}, - ) - ts = time.time() - envelopes = capture_envelopes() + assert metrics[1]["name"] == "test.gauge" + assert metrics[1]["type"] == "gauge" + assert metrics[1]["value"] == 42.0 + assert metrics[1]["unit"] == "millisecond" - metrics.set("my-set", "peter", tags={"magic": "puff"}, timestamp=ts) - metrics.set("my-set", "paul", tags={"magic": "puff"}, timestamp=ts) - metrics.set("my-set", "mary", tags={"magic": "puff"}, timestamp=ts) - sentry_sdk.flush() - - (envelope,) = envelopes - statsd_item, meta_item = envelope.items - - assert statsd_item.headers["type"] == "statsd" - m = parse_metrics(statsd_item.payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "my-set@none" - assert m[0][2] == "s" - assert len(m[0][3]) == 3 - assert sorted(map(int, m[0][3])) == [354582103, 2513273657, 3329318813] - assert m[0][4] == { - "magic": "puff", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert meta_item.headers["type"] == "metric_meta" - assert parse_json(meta_item.payload.get_bytes()) == { - "timestamp": mock.ANY, - "mapping": { - "s:my-set@none": [ - { - "type": "location", - "filename": "tests/test_metrics.py", - "abs_path": __file__, - "function": sys._getframe().f_code.co_name, - "module": __name__, - "lineno": mock.ANY, - "pre_context": mock.ANY, - "context_line": mock.ANY, - "post_context": mock.ANY, - } - ] - }, - } + assert metrics[2]["name"] == "test.distribution" + assert metrics[2]["type"] == "distribution" + assert metrics[2]["value"] == 200.0 + assert metrics[2]["unit"] == "second" -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_gauge(sentry_init, capture_envelopes, maybe_monkeypatched_threading): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": False}, - ) - ts = time.time() +def test_metrics_experimental_option(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_metrics": True}) envelopes = capture_envelopes() - metrics.gauge("my-gauge", 10.0, tags={"x": "y"}, timestamp=ts) - metrics.gauge("my-gauge", 20.0, tags={"x": "y"}, timestamp=ts) - metrics.gauge("my-gauge", 30.0, tags={"x": "y"}, timestamp=ts) - sentry_sdk.flush() + _metrics.count("test.counter", 5) - (envelope,) = envelopes + get_client().flush() - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 - assert len(m) == 1 - assert m[0][1] == "my-gauge@none" - assert m[0][2] == "g" - assert len(m[0][3]) == 5 - assert list(map(float, m[0][3])) == [30.0, 10.0, 30.0, 60.0, 3.0] - assert m[0][4] == { - "x": "y", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } + assert metrics[0]["name"] == "test.counter" + assert metrics[0]["type"] == "counter" + assert metrics[0]["value"] == 5.0 -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_multiple(sentry_init, capture_envelopes): +def test_metrics_with_attributes(sentry_init, capture_envelopes): sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": False}, + _experiments={"enable_metrics": True}, release="1.0.0", environment="test" ) - ts = time.time() envelopes = capture_envelopes() - metrics.gauge("my-gauge", 10.0, tags={"x": "y"}, timestamp=ts) - metrics.gauge("my-gauge", 20.0, tags={"x": "y"}, timestamp=ts) - metrics.gauge("my-gauge", 30.0, tags={"x": "y"}, timestamp=ts) - for _ in range(10): - metrics.increment("counter-1", 1.0, timestamp=ts) - metrics.increment("counter-2", 1.0, timestamp=ts) - - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 3 - - assert m[0][1] == "counter-1@none" - assert m[0][2] == "c" - assert list(map(float, m[0][3])) == [10.0] - assert m[0][4] == { - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert m[1][1] == "counter-2@none" - assert m[1][2] == "c" - assert list(map(float, m[1][3])) == [1.0] - assert m[1][4] == { - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert m[2][1] == "my-gauge@none" - assert m[2][2] == "g" - assert len(m[2][3]) == 5 - assert list(map(float, m[2][3])) == [30.0, 10.0, 30.0, 60.0, 3.0] - assert m[2][4] == { - "x": "y", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_transaction_name( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": False}, + _metrics.count( + "test.counter", 1, attributes={"endpoint": "/api/test", "status": "success"} ) - ts = time.time() - envelopes = capture_envelopes() - sentry_sdk.get_current_scope().set_transaction_name( - "/user/{user_id}", source=TransactionSource.ROUTE - ) - metrics.distribution("dist", 1.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 2.0, tags={"a": "b"}, timestamp=ts) - metrics.distribution("dist", 3.0, tags={"a": "b"}, timestamp=ts) - - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "dist@none" - assert m[0][2] == "d" - assert len(m[0][3]) == 4 - assert sorted(map(float, m[0][3])) == [1.0, 2.0, 2.0, 3.0] - assert m[0][4] == { - "a": "b", - "transaction": "/user/{user_id}", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_metric_summaries( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - enable_tracing=True, - ) - ts = time.time() - envelopes = capture_envelopes() + get_client().flush() - with sentry_sdk.start_transaction( - op="stuff", name="/foo", source=TransactionSource.ROUTE - ) as transaction: - metrics.increment("root-counter", timestamp=ts) - with metrics.timing("my-timer-metric", tags={"a": "b"}, timestamp=ts): - for x in range(10): - metrics.distribution("my-dist", float(x), timestamp=ts) - - sentry_sdk.flush() - - (transaction, envelope) = envelopes - - # Metrics Emission - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 3 - - assert m[0][1] == "my-dist@none" - assert m[0][2] == "d" - assert len(m[0][3]) == 10 - assert sorted(m[0][3]) == list(map(str, map(float, range(10)))) - assert m[0][4] == { - "transaction": "/foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert m[1][1] == "my-timer-metric@second" - assert m[1][2] == "d" - assert len(m[1][3]) == 1 - assert m[1][4] == { - "a": "b", - "transaction": "/foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - assert m[2][1] == "root-counter@none" - assert m[2][2] == "c" - assert m[2][3] == ["1.0"] - assert m[2][4] == { - "transaction": "/foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - # Measurement Attachment - t = transaction.items[0].get_transaction_event() - - assert t["_metrics_summary"] == { - "c:root-counter@none": [ - { - "count": 1, - "min": 1.0, - "max": 1.0, - "sum": 1.0, - "tags": { - "transaction": "/foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - }, - } - ] - } - - assert t["spans"][0]["_metrics_summary"]["d:my-dist@none"] == [ - { - "count": 10, - "min": 0.0, - "max": 9.0, - "sum": 45.0, - "tags": { - "environment": "not-fun-env", - "release": "fun-release@1.0.0", - "transaction": "/foo", - }, - } - ] - - assert t["spans"][0]["tags"] == {"a": "b"} - (timer,) = t["spans"][0]["_metrics_summary"]["d:my-timer-metric@second"] - assert timer["count"] == 1 - assert timer["max"] == timer["min"] == timer["sum"] - assert timer["sum"] > 0 - assert timer["tags"] == { - "a": "b", - "environment": "not-fun-env", - "release": "fun-release@1.0.0", - "transaction": "/foo", - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -@pytest.mark.parametrize( - "metric_name,metric_unit,expected_name", - [ - ("first-metric", "nano-second", "first-metric@nanosecond"), - ("another_metric?", "nano second", "another_metric_@nanosecond"), - ( - "metric", - "nanosecond", - "metric@nanosecond", - ), - ( - "my.amaze.metric I guess", - "nano|\nsecond", - "my.amaze.metric_I_guess@nanosecond", - ), - ("métríc", "nanöseconď", "m_tr_c@nansecon"), - ], -) -def test_metric_name_normalization( - sentry_init, - capture_envelopes, - metric_name, - metric_unit, - expected_name, - maybe_monkeypatched_threading, -): - sentry_init( - _experiments={"enable_metrics": True, "metric_code_locations": False}, - ) - envelopes = capture_envelopes() + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 - metrics.distribution(metric_name, 1.0, unit=metric_unit) - - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - - parsed_metrics = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(parsed_metrics) == 1 - - name = parsed_metrics[0][1] - assert name == expected_name - - -@minimum_python_37_with_gevent -@pytest.mark.forked -@pytest.mark.parametrize( - "metric_tag,expected_tag", - [ - ({"f-oo|bar": "%$foo/"}, {"f-oobar": "%$foo/"}), - ({"foo$.$.$bar": "blah{}"}, {"foo..bar": "blah{}"}), - ( - {"foö-bar": "snöwmän"}, - {"fo-bar": "snöwmän"}, - ), - ({"route": "GET /foo"}, {"route": "GET /foo"}), - ({"__bar__": "this | or , that"}, {"__bar__": "this \\u{7c} or \\u{2c} that"}), - ({"foo/": "hello!\n\r\t\\"}, {"foo/": "hello!\\n\\r\\t\\\\"}), - ], -) -def test_metric_tag_normalization( - sentry_init, - capture_envelopes, - metric_tag, - expected_tag, - maybe_monkeypatched_threading, -): - sentry_init( - _experiments={"enable_metrics": True, "metric_code_locations": False}, - ) - envelopes = capture_envelopes() - - metrics.distribution("a", 1.0, tags=metric_tag) - - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - - parsed_metrics = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(parsed_metrics) == 1 - - tags = parsed_metrics[0][4] - - expected_tag_key, expected_tag_value = expected_tag.popitem() - assert expected_tag_key in tags - assert tags[expected_tag_key] == expected_tag_value - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_before_emit_metric( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - def before_emit(key, value, unit, tags): - if key == "removed-metric" or value == 47 or unit == "unsupported": - return False + assert metrics[0]["attributes"]["endpoint"] == "/api/test" + assert metrics[0]["attributes"]["status"] == "success" + assert metrics[0]["attributes"]["sentry.release"] == "1.0.0" + assert metrics[0]["attributes"]["sentry.environment"] == "test" - tags["extra"] = "foo" - del tags["release"] - # this better be a noop! - metrics.increment("shitty-recursion") - return True - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={ - "enable_metrics": True, - "metric_code_locations": False, - "before_emit_metric": before_emit, - }, - ) +def test_metrics_with_user(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_metrics": True}) envelopes = capture_envelopes() - metrics.increment("removed-metric", 1.0) - metrics.increment("another-removed-metric", 47) - metrics.increment("yet-another-removed-metric", 1.0, unit="unsupported") - metrics.increment("actual-metric", 1.0) - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "actual-metric@none" - assert m[0][3] == ["1.0"] - assert m[0][4] == { - "extra": "foo", - "environment": "not-fun-env", - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_aggregator_flush( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - _experiments={ - "enable_metrics": True, - }, + sentry_sdk.set_user( + {"id": "user-123", "email": "test@example.com", "username": "testuser"} ) - envelopes = capture_envelopes() + _metrics.count("test.user.counter", 1) - metrics.increment("a-metric", 1.0) - sentry_sdk.flush() + get_client().flush() - assert len(envelopes) == 1 - assert sentry_sdk.get_client().metrics_aggregator.buckets == {} + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 + assert metrics[0]["attributes"]["user.id"] == "user-123" + assert metrics[0]["attributes"]["user.email"] == "test@example.com" + assert metrics[0]["attributes"]["user.name"] == "testuser" -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_tag_serialization( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release", - environment="not-fun-env", - _experiments={"enable_metrics": True, "metric_code_locations": False}, - ) - envelopes = capture_envelopes() - metrics.increment( - "counter", - tags={ - "no-value": None, - "an-int": 42, - "a-float": 23.0, - "a-string": "blah", - "more-than-one": [1, "zwei", "3.0", None], - }, - ) - sentry_sdk.flush() - - (envelope,) = envelopes - - assert len(envelope.items) == 1 - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 1 - assert m[0][4] == { - "an-int": "42", - "a-float": "23.0", - "a-string": "blah", - "more-than-one": ["1", "3.0", "zwei"], - "release": "fun-release", - "environment": "not-fun-env", - } - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_flush_recursion_protection( - sentry_init, capture_envelopes, monkeypatch, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release", - environment="not-fun-env", - _experiments={"enable_metrics": True}, - ) +def test_metrics_with_span(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_metrics": True}, traces_sample_rate=1.0) envelopes = capture_envelopes() - test_client = sentry_sdk.get_client() - real_capture_envelope = test_client.transport.capture_envelope + with sentry_sdk.start_transaction(op="test", name="test-span"): + _metrics.count("test.span.counter", 1) - def bad_capture_envelope(*args, **kwargs): - metrics.increment("bad-metric") - return real_capture_envelope(*args, **kwargs) + get_client().flush() - monkeypatch.setattr(test_client.transport, "capture_envelope", bad_capture_envelope) + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 - metrics.increment("counter") + assert metrics[0]["trace_id"] is not None + assert metrics[0]["trace_id"] != "00000000-0000-0000-0000-000000000000" + assert metrics[0]["span_id"] is not None - # flush twice to see the inner metric - sentry_sdk.flush() - sentry_sdk.flush() - (envelope,) = envelopes - m = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(m) == 1 - assert m[0][1] == "counter@none" +def test_metrics_tracing_without_performance(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_metrics": True}) + envelopes = capture_envelopes() + _metrics.count("test.span.counter", 1) -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_flush_recursion_protection_background_flush( - sentry_init, capture_envelopes, monkeypatch, maybe_monkeypatched_threading -): - monkeypatch.setattr(metrics.MetricsAggregator, "FLUSHER_SLEEP_TIME", 0.01) - sentry_init( - release="fun-release", - environment="not-fun-env", - _experiments={"enable_metrics": True}, - ) - envelopes = capture_envelopes() - test_client = sentry_sdk.get_client() + get_client().flush() - real_capture_envelope = test_client.transport.capture_envelope + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 - def bad_capture_envelope(*args, **kwargs): - metrics.increment("bad-metric") - return real_capture_envelope(*args, **kwargs) + assert metrics[0]["trace_id"] is not None + assert metrics[0]["trace_id"] != "00000000-0000-0000-0000-000000000000" + assert metrics[0]["span_id"] is None - monkeypatch.setattr(test_client.transport, "capture_envelope", bad_capture_envelope) - metrics.increment("counter") +def test_metrics_before_send(sentry_init, capture_envelopes): + before_metric_called = False - # flush via sleep and flag - sentry_sdk.get_client().metrics_aggregator._force_flush = True - time.sleep(0.5) + def _before_metric(record, hint): + nonlocal before_metric_called - (envelope,) = envelopes - m = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(m) == 1 - assert m[0][1] == "counter@none" + assert set(record.keys()) == { + "timestamp", + "trace_id", + "span_id", + "name", + "type", + "value", + "unit", + "attributes", + } + if record["name"] == "test.skip": + return None -@pytest.mark.skipif( - not gevent or sys.version_info >= (3, 7), - reason="Python 3.6 or lower and gevent required", -) -@pytest.mark.forked -def test_disable_metrics_for_old_python_with_gevent( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - if maybe_monkeypatched_threading != "greenlet": - pytest.skip("Test specifically for gevent/greenlet") + before_metric_called = True + return record sentry_init( - release="fun-release", - environment="not-fun-env", - _experiments={"enable_metrics": True}, + _experiments={ + "enable_metrics": True, + "before_send_metric": _before_metric, + }, ) envelopes = capture_envelopes() - metrics.incr("counter") + _metrics.count("test.skip", 1) + _metrics.count("test.keep", 1) - sentry_sdk.flush() + get_client().flush() - assert sentry_sdk.get_client().metrics_aggregator is None - assert not envelopes + metrics = envelopes_to_metrics(envelopes) + assert len(metrics) == 1 + assert metrics[0]["name"] == "test.keep" + assert before_metric_called diff --git a/tests/test_scope.py b/tests/test_scope.py index e645d84234..68c93f3036 100644 --- a/tests/test_scope.py +++ b/tests/test_scope.py @@ -908,6 +908,7 @@ def test_last_event_id_cleared(sentry_init): @pytest.mark.tests_internal_exceptions +@pytest.mark.parametrize("error_cls", [LookupError, ValueError]) @pytest.mark.parametrize( "scope_manager", [ @@ -915,10 +916,10 @@ def test_last_event_id_cleared(sentry_init): use_scope, ], ) -def test_handle_lookup_error_on_token_reset_current_scope(scope_manager): +def test_handle_error_on_token_reset_current_scope(error_cls, scope_manager): with mock.patch("sentry_sdk.scope.capture_internal_exception") as mock_capture: with mock.patch("sentry_sdk.scope._current_scope") as mock_token_var: - mock_token_var.reset.side_effect = LookupError() + mock_token_var.reset.side_effect = error_cls() mock_token = mock.Mock() mock_token_var.set.return_value = mock_token @@ -932,13 +933,14 @@ def test_handle_lookup_error_on_token_reset_current_scope(scope_manager): pass except Exception: - pytest.fail("Context manager should handle LookupError gracefully") + pytest.fail(f"Context manager should handle {error_cls} gracefully") mock_capture.assert_called_once() mock_token_var.reset.assert_called_once_with(mock_token) @pytest.mark.tests_internal_exceptions +@pytest.mark.parametrize("error_cls", [LookupError, ValueError]) @pytest.mark.parametrize( "scope_manager", [ @@ -946,13 +948,13 @@ def test_handle_lookup_error_on_token_reset_current_scope(scope_manager): use_isolation_scope, ], ) -def test_handle_lookup_error_on_token_reset_isolation_scope(scope_manager): +def test_handle_error_on_token_reset_isolation_scope(error_cls, scope_manager): with mock.patch("sentry_sdk.scope.capture_internal_exception") as mock_capture: with mock.patch("sentry_sdk.scope._current_scope") as mock_current_scope: with mock.patch( "sentry_sdk.scope._isolation_scope" ) as mock_isolation_scope: - mock_isolation_scope.reset.side_effect = LookupError() + mock_isolation_scope.reset.side_effect = error_cls() mock_current_token = mock.Mock() mock_current_scope.set.return_value = mock_current_token @@ -965,7 +967,7 @@ def test_handle_lookup_error_on_token_reset_isolation_scope(scope_manager): pass except Exception: - pytest.fail("Context manager should handle LookupError gracefully") + pytest.fail(f"Context manager should handle {error_cls} gracefully") mock_capture.assert_called_once() mock_current_scope.reset.assert_called_once_with(mock_current_token) diff --git a/tests/test_transport.py b/tests/test_transport.py index 68669fa24d..804105b010 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -590,43 +590,6 @@ def test_complex_limits_without_data_category( assert len(capturing_server.captured) == 0 -@pytest.mark.parametrize("response_code", [200, 429]) -def test_metric_bucket_limits(capturing_server, response_code, make_client): - client = make_client() - capturing_server.respond_with( - code=response_code, - headers={ - "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded:custom" - }, - ) - - envelope = Envelope() - envelope.add_item(Item(payload=b"{}", type="statsd")) - client.transport.capture_envelope(envelope) - client.flush() - - assert len(capturing_server.captured) == 1 - assert capturing_server.captured[0].path == "/api/132/envelope/" - capturing_server.clear_captured() - - assert set(client.transport._disabled_until) == {"metric_bucket"} - - client.transport.capture_envelope(envelope) - client.capture_event({"type": "transaction"}) - client.flush() - - assert len(capturing_server.captured) == 2 - - envelope = capturing_server.captured[0].envelope - assert envelope.items[0].type == "transaction" - envelope = capturing_server.captured[1].envelope - assert envelope.items[0].type == "client_report" - report = parse_json(envelope.items[0].get_bytes()) - assert report["discarded_events"] == [ - {"category": "metric_bucket", "reason": "ratelimit_backoff", "quantity": 1}, - ] - - @pytest.mark.parametrize("response_code", [200, 429]) def test_log_item_limits(capturing_server, response_code, make_client): client = make_client() @@ -664,80 +627,6 @@ def test_log_item_limits(capturing_server, response_code, make_client): ] -@pytest.mark.parametrize("response_code", [200, 429]) -def test_metric_bucket_limits_with_namespace( - capturing_server, response_code, make_client -): - client = make_client() - capturing_server.respond_with( - code=response_code, - headers={ - "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded:foo" - }, - ) - - envelope = Envelope() - envelope.add_item(Item(payload=b"{}", type="statsd")) - client.transport.capture_envelope(envelope) - client.flush() - - assert len(capturing_server.captured) == 1 - assert capturing_server.captured[0].path == "/api/132/envelope/" - capturing_server.clear_captured() - - assert set(client.transport._disabled_until) == set([]) - - client.transport.capture_envelope(envelope) - client.capture_event({"type": "transaction"}) - client.flush() - - assert len(capturing_server.captured) == 2 - - envelope = capturing_server.captured[0].envelope - assert envelope.items[0].type == "statsd" - envelope = capturing_server.captured[1].envelope - assert envelope.items[0].type == "transaction" - - -@pytest.mark.parametrize("response_code", [200, 429]) -def test_metric_bucket_limits_with_all_namespaces( - capturing_server, response_code, make_client -): - client = make_client() - capturing_server.respond_with( - code=response_code, - headers={ - "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded" - }, - ) - - envelope = Envelope() - envelope.add_item(Item(payload=b"{}", type="statsd")) - client.transport.capture_envelope(envelope) - client.flush() - - assert len(capturing_server.captured) == 1 - assert capturing_server.captured[0].path == "/api/132/envelope/" - capturing_server.clear_captured() - - assert set(client.transport._disabled_until) == set(["metric_bucket"]) - - client.transport.capture_envelope(envelope) - client.capture_event({"type": "transaction"}) - client.flush() - - assert len(capturing_server.captured) == 2 - - envelope = capturing_server.captured[0].envelope - assert envelope.items[0].type == "transaction" - envelope = capturing_server.captured[1].envelope - assert envelope.items[0].type == "client_report" - report = parse_json(envelope.items[0].get_bytes()) - assert report["discarded_events"] == [ - {"category": "metric_bucket", "reason": "ratelimit_backoff", "quantity": 1}, - ] - - def test_hub_cls_backwards_compat(): class TestCustomHubClass(Hub): pass diff --git a/tox.ini b/tox.ini index 2354c66c7c..f72f05e25a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,16 @@ -# Tox (http://codespeak.net/~hpk/tox/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. +# DON'T EDIT THIS FILE BY HAND. This file has been generated from a template by +# `scripts/populate_tox/populate_tox.py`. # -# This file has been generated from a template -# by "scripts/populate_tox/populate_tox.py". Any changes to the file should -# be made in the template (if you want to change a hardcoded part of the file) -# or in the script (if you want to change the auto-generated part). -# The file (and all resulting CI YAMLs) then needs to be regenerated via -# "scripts/generate-test-files.sh". +# Any changes to the test matrix should be made +# - either in the script config in `scripts/populate_tox/config.py` (if you want +# to change the auto-generated part) +# - or in the template in `scripts/populate_tox/tox.jinja` (if you want to change +# a hardcoded part of the file) +# +# This file (and all resulting CI YAMLs) then needs to be regenerated via +# `scripts/generate-test-files.sh`. +# +# See also `scripts/populate_tox/README.md` for more info. [tox] requires = @@ -46,15 +48,26 @@ envlist = # ~~~ AI ~~~ {py3.8,py3.11,py3.12}-anthropic-v0.16.0 - {py3.8,py3.11,py3.12}-anthropic-v0.33.1 - {py3.8,py3.11,py3.12}-anthropic-v0.50.0 - {py3.8,py3.12,py3.13}-anthropic-v0.68.1 + {py3.8,py3.11,py3.12}-anthropic-v0.34.2 + {py3.8,py3.11,py3.12}-anthropic-v0.52.2 + {py3.8,py3.12,py3.13}-anthropic-v0.69.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 {py3.9,py3.11,py3.12}-cohere-v5.13.12 {py3.9,py3.11,py3.12}-cohere-v5.18.0 + {py3.9,py3.12,py3.13}-google_genai-v1.29.0 + {py3.9,py3.12,py3.13}-google_genai-v1.34.0 + {py3.9,py3.12,py3.13}-google_genai-v1.39.1 + {py3.9,py3.12,py3.13}-google_genai-v1.43.0 + + {py3.8,py3.10,py3.11}-huggingface_hub-v0.24.7 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.28.1 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.32.6 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.3 + {py3.9,py3.12,py3.13}-huggingface_hub-v1.0.0rc5 + {py3.9,py3.11,py3.12}-langchain-base-v0.1.20 {py3.9,py3.11,py3.12}-langchain-base-v0.2.17 {py3.9,py3.12,py3.13}-langchain-base-v0.3.27 @@ -63,35 +76,31 @@ envlist = {py3.9,py3.11,py3.12}-langchain-notiktoken-v0.2.17 {py3.9,py3.12,py3.13}-langchain-notiktoken-v0.3.27 + {py3.9,py3.12,py3.13}-langgraph-v0.6.10 + {py3.10,py3.12,py3.13}-langgraph-v1.0.0a4 + + {py3.9,py3.12,py3.13}-litellm-v1.77.7 + {py3.9,py3.12,py3.13}-litellm-v1.78.0 + {py3.8,py3.11,py3.12}-openai-base-v1.0.1 - {py3.8,py3.11,py3.12}-openai-base-v1.37.2 - {py3.8,py3.11,py3.12}-openai-base-v1.73.0 {py3.8,py3.12,py3.13}-openai-base-v1.109.1 + {py3.9,py3.12,py3.13}-openai-base-v2.3.0 {py3.8,py3.11,py3.12}-openai-notiktoken-v1.0.1 - {py3.8,py3.11,py3.12}-openai-notiktoken-v1.37.2 - {py3.8,py3.11,py3.12}-openai-notiktoken-v1.73.0 {py3.8,py3.12,py3.13}-openai-notiktoken-v1.109.1 - - {py3.9,py3.12,py3.13}-langgraph-v0.6.7 - {py3.10,py3.12,py3.13}-langgraph-v1.0.0a3 + {py3.9,py3.12,py3.13}-openai-notiktoken-v2.3.0 {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 {py3.10,py3.12,py3.13}-openai_agents-v0.2.11 - {py3.10,py3.12,py3.13}-openai_agents-v0.3.2 - - {py3.8,py3.10,py3.11}-huggingface_hub-v0.24.7 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.28.1 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.32.6 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.1 + {py3.10,py3.12,py3.13}-openai_agents-v0.3.3 # ~~~ Cloud ~~~ {py3.6,py3.7}-boto3-v1.12.49 {py3.6,py3.9,py3.10}-boto3-v1.20.54 {py3.7,py3.11,py3.12}-boto3-v1.28.85 - {py3.9,py3.12,py3.13}-boto3-v1.40.40 + {py3.9,py3.12,py3.13}-boto3-v1.40.50 {py3.6,py3.7,py3.8}-chalice-v1.16.0 {py3.9,py3.12,py3.13}-chalice-v1.32.0 @@ -107,32 +116,32 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 - {py3.9,py3.12,py3.13}-pymongo-v4.15.1 + {py3.9,py3.12,py3.13}-pymongo-v4.15.3 {py3.6}-redis-v2.10.6 {py3.6,py3.7,py3.8}-redis-v3.5.3 {py3.7,py3.10,py3.11}-redis-v4.6.0 {py3.8,py3.11,py3.12}-redis-v5.3.1 {py3.9,py3.12,py3.13}-redis-v6.4.0 - {py3.9,py3.12,py3.13}-redis-v7.0.0b2 + {py3.9,py3.12,py3.13}-redis-v7.0.0b3 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7,py3.8}-redis_py_cluster_legacy-v2.1.3 {py3.6,py3.8,py3.9}-sqlalchemy-v1.3.24 {py3.6,py3.11,py3.12}-sqlalchemy-v1.4.54 - {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.43 + {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.44 # ~~~ Flags ~~~ {py3.8,py3.12,py3.13}-launchdarkly-v9.8.1 - {py3.9,py3.12,py3.13}-launchdarkly-v9.12.0 + {py3.9,py3.12,py3.13}-launchdarkly-v9.12.1 {py3.8,py3.12,py3.13}-openfeature-v0.7.5 {py3.9,py3.12,py3.13}-openfeature-v0.8.3 {py3.7,py3.12,py3.13}-statsig-v0.55.3 - {py3.7,py3.12,py3.13}-statsig-v0.64.0 + {py3.7,py3.12,py3.13}-statsig-v0.65.0 {py3.8,py3.12,py3.13}-unleash-v6.0.1 {py3.8,py3.12,py3.13}-unleash-v6.3.0 @@ -150,7 +159,7 @@ envlist = {py3.8,py3.12,py3.13}-graphene-v3.4.3 {py3.8,py3.10,py3.11}-strawberry-v0.209.8 - {py3.9,py3.12,py3.13}-strawberry-v0.282.0 + {py3.9,py3.12,py3.13}-strawberry-v0.283.3 # ~~~ Network ~~~ @@ -158,6 +167,7 @@ envlist = {py3.7,py3.9,py3.10}-grpc-v1.47.5 {py3.7,py3.11,py3.12}-grpc-v1.62.3 {py3.9,py3.12,py3.13}-grpc-v1.75.1 + {py3.9,py3.12,py3.13}-grpc-v1.76.0rc1 {py3.6,py3.8,py3.9}-httpx-v0.16.1 {py3.6,py3.9,py3.10}-httpx-v0.20.0 @@ -186,7 +196,7 @@ envlist = {py3.6,py3.11,py3.12}-huey-v2.5.3 {py3.9,py3.10}-ray-v2.7.2 - {py3.9,py3.12,py3.13}-ray-v2.49.2 + {py3.9,py3.12,py3.13}-ray-v2.50.0 {py3.6}-rq-v0.8.2 {py3.6,py3.7}-rq-v0.13.0 @@ -202,8 +212,8 @@ envlist = {py3.6,py3.7}-django-v1.11.29 {py3.6,py3.8,py3.9}-django-v2.2.28 {py3.6,py3.9,py3.10}-django-v3.2.25 - {py3.8,py3.11,py3.12}-django-v4.2.24 - {py3.10,py3.12,py3.13}-django-v5.2.6 + {py3.8,py3.11,py3.12}-django-v4.2.25 + {py3.10,py3.12,py3.13}-django-v5.2.7 {py3.12,py3.13}-django-v6.0a1 {py3.6,py3.7,py3.8}-flask-v1.1.4 @@ -218,14 +228,14 @@ envlist = {py3.6,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.92.0 {py3.8,py3.10,py3.11}-fastapi-v0.105.0 - {py3.8,py3.12,py3.13}-fastapi-v0.118.0 + {py3.8,py3.12,py3.13}-fastapi-v0.119.0 # ~~~ Web 2 ~~~ {py3.7}-aiohttp-v3.4.4 {py3.7,py3.8,py3.9}-aiohttp-v3.7.4 {py3.8,py3.12,py3.13}-aiohttp-v3.10.11 - {py3.9,py3.12,py3.13}-aiohttp-v3.12.15 + {py3.9,py3.12,py3.13}-aiohttp-v3.13.0 {py3.6,py3.7}-bottle-v0.12.25 {py3.8,py3.12,py3.13}-bottle-v0.13.4 @@ -238,7 +248,7 @@ envlist = {py3.8,py3.10,py3.11}-litestar-v2.0.1 {py3.8,py3.11,py3.12}-litestar-v2.6.4 {py3.8,py3.11,py3.12}-litestar-v2.12.1 - {py3.8,py3.12,py3.13}-litestar-v2.17.0 + {py3.8,py3.12,py3.13}-litestar-v2.18.0 {py3.6}-pyramid-v1.8.6 {py3.6,py3.8,py3.9}-pyramid-v1.10.8 @@ -269,7 +279,7 @@ envlist = {py3.6}-trytond-v4.8.18 {py3.6,py3.7,py3.8}-trytond-v5.8.16 {py3.8,py3.10,py3.11}-trytond-v6.8.17 - {py3.9,py3.12,py3.13}-trytond-v7.6.7 + {py3.9,py3.12,py3.13}-trytond-v7.6.8 {py3.7,py3.12,py3.13}-typer-v0.15.4 {py3.8,py3.12,py3.13}-typer-v0.19.2 @@ -332,18 +342,32 @@ deps = # ~~~ AI ~~~ anthropic-v0.16.0: anthropic==0.16.0 - anthropic-v0.33.1: anthropic==0.33.1 - anthropic-v0.50.0: anthropic==0.50.0 - anthropic-v0.68.1: anthropic==0.68.1 + anthropic-v0.34.2: anthropic==0.34.2 + anthropic-v0.52.2: anthropic==0.52.2 + anthropic-v0.69.0: anthropic==0.69.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 - anthropic-v0.33.1: httpx<0.28.0 + anthropic-v0.34.2: httpx<0.28.0 cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 cohere-v5.13.12: cohere==5.13.12 cohere-v5.18.0: cohere==5.18.0 + google_genai-v1.29.0: google-genai==1.29.0 + google_genai-v1.34.0: google-genai==1.34.0 + google_genai-v1.39.1: google-genai==1.39.1 + google_genai-v1.43.0: google-genai==1.43.0 + google_genai: pytest-asyncio + + huggingface_hub-v0.24.7: huggingface_hub==0.24.7 + huggingface_hub-v0.28.1: huggingface_hub==0.28.1 + huggingface_hub-v0.32.6: huggingface_hub==0.32.6 + huggingface_hub-v0.35.3: huggingface_hub==0.35.3 + huggingface_hub-v1.0.0rc5: huggingface_hub==1.0.0rc5 + huggingface_hub: responses + huggingface_hub: pytest-httpx + langchain-base-v0.1.20: langchain==0.1.20 langchain-base-v0.2.17: langchain==0.2.17 langchain-base-v0.3.27: langchain==0.3.27 @@ -359,44 +383,37 @@ deps = langchain-notiktoken: langchain-openai langchain-notiktoken-v0.3.27: langchain-community + langgraph-v0.6.10: langgraph==0.6.10 + langgraph-v1.0.0a4: langgraph==1.0.0a4 + + litellm-v1.77.7: litellm==1.77.7 + litellm-v1.78.0: litellm==1.78.0 + openai-base-v1.0.1: openai==1.0.1 - openai-base-v1.37.2: openai==1.37.2 - openai-base-v1.73.0: openai==1.73.0 openai-base-v1.109.1: openai==1.109.1 + openai-base-v2.3.0: openai==2.3.0 openai-base: pytest-asyncio openai-base: tiktoken openai-base-v1.0.1: httpx<0.28 - openai-base-v1.37.2: httpx<0.28 openai-notiktoken-v1.0.1: openai==1.0.1 - openai-notiktoken-v1.37.2: openai==1.37.2 - openai-notiktoken-v1.73.0: openai==1.73.0 openai-notiktoken-v1.109.1: openai==1.109.1 + openai-notiktoken-v2.3.0: openai==2.3.0 openai-notiktoken: pytest-asyncio openai-notiktoken-v1.0.1: httpx<0.28 - openai-notiktoken-v1.37.2: httpx<0.28 - - langgraph-v0.6.7: langgraph==0.6.7 - langgraph-v1.0.0a3: langgraph==1.0.0a3 openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 openai_agents-v0.2.11: openai-agents==0.2.11 - openai_agents-v0.3.2: openai-agents==0.3.2 + openai_agents-v0.3.3: openai-agents==0.3.3 openai_agents: pytest-asyncio - huggingface_hub-v0.24.7: huggingface_hub==0.24.7 - huggingface_hub-v0.28.1: huggingface_hub==0.28.1 - huggingface_hub-v0.32.6: huggingface_hub==0.32.6 - huggingface_hub-v0.35.1: huggingface_hub==0.35.1 - huggingface_hub: responses - # ~~~ Cloud ~~~ boto3-v1.12.49: boto3==1.12.49 boto3-v1.20.54: boto3==1.20.54 boto3-v1.28.85: boto3==1.28.85 - boto3-v1.40.40: boto3==1.40.40 + boto3-v1.40.50: boto3==1.40.50 {py3.7,py3.8}-boto3: urllib3<2.0.0 chalice-v1.16.0: chalice==1.16.0 @@ -415,7 +432,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 - pymongo-v4.15.1: pymongo==4.15.1 + pymongo-v4.15.3: pymongo==4.15.3 pymongo: mockupdb redis-v2.10.6: redis==2.10.6 @@ -423,7 +440,7 @@ deps = redis-v4.6.0: redis==4.6.0 redis-v5.3.1: redis==5.3.1 redis-v6.4.0: redis==6.4.0 - redis-v7.0.0b2: redis==7.0.0b2 + redis-v7.0.0b3: redis==7.0.0b3 redis: fakeredis!=1.7.4 redis: pytest<8.0.0 redis-v4.6.0: fakeredis<2.31.0 @@ -435,18 +452,18 @@ deps = sqlalchemy-v1.3.24: sqlalchemy==1.3.24 sqlalchemy-v1.4.54: sqlalchemy==1.4.54 - sqlalchemy-v2.0.43: sqlalchemy==2.0.43 + sqlalchemy-v2.0.44: sqlalchemy==2.0.44 # ~~~ Flags ~~~ launchdarkly-v9.8.1: launchdarkly-server-sdk==9.8.1 - launchdarkly-v9.12.0: launchdarkly-server-sdk==9.12.0 + launchdarkly-v9.12.1: launchdarkly-server-sdk==9.12.1 openfeature-v0.7.5: openfeature-sdk==0.7.5 openfeature-v0.8.3: openfeature-sdk==0.8.3 statsig-v0.55.3: statsig==0.55.3 - statsig-v0.64.0: statsig==0.64.0 + statsig-v0.65.0: statsig==0.65.0 statsig: typing_extensions unleash-v6.0.1: UnleashClient==6.0.1 @@ -473,7 +490,7 @@ deps = {py3.6}-graphene: aiocontextvars strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 - strawberry-v0.282.0: strawberry-graphql[fastapi,flask]==0.282.0 + strawberry-v0.283.3: strawberry-graphql[fastapi,flask]==0.283.3 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 @@ -483,6 +500,7 @@ deps = grpc-v1.47.5: grpcio==1.47.5 grpc-v1.62.3: grpcio==1.62.3 grpc-v1.75.1: grpcio==1.75.1 + grpc-v1.76.0rc1: grpcio==1.76.0rc1 grpc: protobuf grpc: mypy-protobuf grpc: types-protobuf @@ -527,7 +545,7 @@ deps = huey-v2.5.3: huey==2.5.3 ray-v2.7.2: ray==2.7.2 - ray-v2.49.2: ray==2.49.2 + ray-v2.50.0: ray==2.50.0 rq-v0.8.2: rq==0.8.2 rq-v0.13.0: rq==0.13.0 @@ -548,8 +566,8 @@ deps = django-v1.11.29: django==1.11.29 django-v2.2.28: django==2.2.28 django-v3.2.25: django==3.2.25 - django-v4.2.24: django==4.2.24 - django-v5.2.6: django==5.2.6 + django-v4.2.25: django==4.2.25 + django-v5.2.7: django==5.2.7 django-v6.0a1: django==6.0a1 django: psycopg2-binary django: djangorestframework @@ -557,13 +575,13 @@ deps = django: Werkzeug django-v2.2.28: channels[daphne] django-v3.2.25: channels[daphne] - django-v4.2.24: channels[daphne] - django-v5.2.6: channels[daphne] + django-v4.2.25: channels[daphne] + django-v5.2.7: channels[daphne] django-v6.0a1: channels[daphne] django-v2.2.28: six django-v3.2.25: pytest-asyncio - django-v4.2.24: pytest-asyncio - django-v5.2.6: pytest-asyncio + django-v4.2.25: pytest-asyncio + django-v5.2.7: pytest-asyncio django-v6.0a1: pytest-asyncio django-v1.11.29: djangorestframework>=3.0,<4.0 django-v1.11.29: Werkzeug<2.1.0 @@ -599,7 +617,7 @@ deps = fastapi-v0.79.1: fastapi==0.79.1 fastapi-v0.92.0: fastapi==0.92.0 fastapi-v0.105.0: fastapi==0.105.0 - fastapi-v0.118.0: fastapi==0.118.0 + fastapi-v0.119.0: fastapi==0.119.0 fastapi: httpx fastapi: pytest-asyncio fastapi: python-multipart @@ -615,10 +633,10 @@ deps = aiohttp-v3.4.4: aiohttp==3.4.4 aiohttp-v3.7.4: aiohttp==3.7.4 aiohttp-v3.10.11: aiohttp==3.10.11 - aiohttp-v3.12.15: aiohttp==3.12.15 + aiohttp-v3.13.0: aiohttp==3.13.0 aiohttp: pytest-aiohttp aiohttp-v3.10.11: pytest-asyncio - aiohttp-v3.12.15: pytest-asyncio + aiohttp-v3.13.0: pytest-asyncio bottle-v0.12.25: bottle==0.12.25 bottle-v0.13.4: bottle==0.13.4 @@ -632,7 +650,7 @@ deps = litestar-v2.0.1: litestar==2.0.1 litestar-v2.6.4: litestar==2.6.4 litestar-v2.12.1: litestar==2.12.1 - litestar-v2.17.0: litestar==2.17.0 + litestar-v2.18.0: litestar==2.18.0 litestar: pytest-asyncio litestar: python-multipart litestar: requests @@ -694,7 +712,7 @@ deps = trytond-v4.8.18: trytond==4.8.18 trytond-v5.8.16: trytond==5.8.16 trytond-v6.8.17: trytond==6.8.17 - trytond-v7.6.7: trytond==7.6.7 + trytond-v7.6.8: trytond==7.6.8 trytond: werkzeug trytond-v4.6.22: werkzeug<1.0 trytond-v4.8.18: werkzeug<1.0 @@ -742,6 +760,7 @@ setenv = falcon: TESTPATH=tests/integrations/falcon fastapi: TESTPATH=tests/integrations/fastapi flask: TESTPATH=tests/integrations/flask + google_genai: TESTPATH=tests/integrations/google_genai gql: TESTPATH=tests/integrations/gql graphene: TESTPATH=tests/integrations/graphene grpc: TESTPATH=tests/integrations/grpc @@ -752,6 +771,7 @@ setenv = langchain-notiktoken: TESTPATH=tests/integrations/langchain langgraph: TESTPATH=tests/integrations/langgraph launchdarkly: TESTPATH=tests/integrations/launchdarkly + litellm: TESTPATH=tests/integrations/litellm litestar: TESTPATH=tests/integrations/litestar loguru: TESTPATH=tests/integrations/loguru openai-base: TESTPATH=tests/integrations/openai @@ -806,7 +826,7 @@ basepython = # Python version is pinned here for consistency across environments. # Tools like ruff and mypy have options that pin the target Python # version (configured in pyproject.toml), ensuring consistent behavior. - linters: python3.12 + linters: python3.14 commands = {py3.7,py3.8}-boto3: pip install urllib3<2.0.0