From dd29433e719482babbe5c724e7330b1f6324abd7 Mon Sep 17 00:00:00 2001 From: Uma Annamalai Date: Tue, 10 Oct 2023 10:19:01 -0700 Subject: [PATCH 01/20] Add openai sync instrumentation. --- newrelic/config.py | 10 ++ newrelic/hooks/mlmodel_openai.py | 138 +++++++++++++++ tests/mlmodel_openai/conftest.py | 5 +- tests/mlmodel_openai/test_chat_completion.py | 169 ++++++++++++++++++- 4 files changed, 318 insertions(+), 4 deletions(-) create mode 100644 newrelic/hooks/mlmodel_openai.py diff --git a/newrelic/config.py b/newrelic/config.py index 6816c43b5b..142d4e80b7 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -2037,6 +2037,16 @@ def _process_trace_cache_import_hooks(): def _process_module_builtin_defaults(): + _process_module_definition( + "openai.api_resources.chat_completion", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_api_resources_chat_completion", + ) + _process_module_definition( + "openai.util", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_util", + ) _process_module_definition( "asyncio.base_events", "newrelic.hooks.coroutines_asyncio", diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py new file mode 100644 index 0000000000..c7afed04f5 --- /dev/null +++ b/newrelic/hooks/mlmodel_openai.py @@ -0,0 +1,138 @@ + +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import openai +import uuid +from newrelic.api.function_trace import FunctionTrace +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.api.transaction import current_transaction +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.core.config import global_settings +from newrelic.common.object_names import callable_name +from newrelic.core.attribute import MAX_LOG_MESSAGE_LENGTH + + +def wrap_chat_completion_create(wrapped, instance, args, kwargs): + transaction = current_transaction() + + if not transaction: + return + + ft_name = callable_name(wrapped) + with FunctionTrace(ft_name) as ft: + response = wrapped(*args, **kwargs) + + if not response: + return + + custom_attrs_dict = transaction._custom_params + conversation_id = custom_attrs_dict["conversation_id"] if "conversation_id" in custom_attrs_dict.keys() else str(uuid.uuid4()) + + chat_completion_id = str(uuid.uuid4()) + available_metadata = get_trace_linking_metadata() + span_id = available_metadata.get("span.id", "") + trace_id = available_metadata.get("trace.id", "") + + response_headers = getattr(response, "_nr_response_headers") + response_model = response.model + settings = transaction.settings if transaction.settings is not None else global_settings() + + chat_completion_summary_dict = { + "id": chat_completion_id, + "appName": settings.app_name, + "conversation_id": conversation_id, + "span_id": span_id, + "trace_id": trace_id, + "transaction_id": transaction._transaction_id, + "api_key_last_four_digits": f"sk-{response.api_key[-4:]}", + "response_time": ft.duration, + "request.model": kwargs.get("model") or kwargs.get("engine"), + "response.model": response_model, + "response.organization": response.organization, + "response.usage.completion_tokens": response.usage.completion_tokens, + "response.usage.total_tokens": response.usage.total_tokens, + "response.usage.prompt_tokens": response.usage.prompt_tokens, + "request.temperature": kwargs.get("temperature"), + "request.max_tokens": kwargs.get("max_tokens"), + "response.choices.finish_reason": response.choices[0].finish_reason, + "response.api_type": response.api_type, + "response.headers.llmVersion": response_headers.get("openai-version"), + "response.headers.ratelimitLimitRequests": check_rate_limit_header(response_headers, "x-ratelimit-limit-requests", True), + "response.headers.ratelimitLimitTokens": check_rate_limit_header(response_headers, "x-ratelimit-limit-tokens", True), + "response.headers.ratelimitResetTokens": check_rate_limit_header(response_headers, "x-ratelimit-reset-tokens", False), + "response.headers.ratelimitResetRequests": check_rate_limit_header(response_headers, "x-ratelimit-reset-requests", False), + "response.headers.ratelimitRemainingTokens": check_rate_limit_header(response_headers, "x-ratelimit-remaining-tokens", True), + "response.headers.ratelimitRemainingRequests": check_rate_limit_header(response_headers, "x-ratelimit-remaining-requests", True), + "vendor": "openAI", + "ingest_source": "Python", + "number_of_messages": len(kwargs.get("messages", [])) + len(response.choices), + "api_version": response_headers.get("openai-version") + } + + transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict) + message_list = list(kwargs.get("messages", [])) + [response.choices[0].message] + + create_chat_completion_message_event(transaction, message_list, chat_completion_id, span_id, trace_id, response_model) + + return response + + +def check_rate_limit_header(response_headers, header_name, is_int): + if header_name in response_headers: + header_value = response_headers.get(header_name) + if is_int: + header_value = int(header_value) + return header_value + else: + return None + + +def create_chat_completion_message_event(transaction, message_list, chat_completion_id, span_id, trace_id, response_model): + if not transaction: + return + + for index, message in enumerate(message_list): + chat_completion_message_dict = { + "id": str(uuid.uuid4()), + "span_id": span_id, + "trace_id": trace_id, + "transaction_id": transaction._transaction_id, + "content": message.get("content", "")[:MAX_LOG_MESSAGE_LENGTH], + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "model": response_model, + "vendor": "openAI", + "ingest_source": "Python", + } + transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict) + + +def wrap_convert_to_openai_object(wrapped, instance, args, kwargs): + resp = args[0] + returned_response = wrapped(*args, **kwargs) + + if isinstance(resp, openai.openai_response.OpenAIResponse): + setattr(returned_response, "_nr_response_headers", getattr(resp, "_headers", {})) + + return returned_response + + +def instrument_openai_api_resources_chat_completion(module): + if hasattr(module.ChatCompletion, "create"): + wrap_function_wrapper(module, "ChatCompletion.create", wrap_chat_completion_create) + + +def instrument_openai_util(module): + wrap_function_wrapper(module, "convert_to_openai_object", wrap_convert_to_openai_object) diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 900c43a812..e84f6482a4 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -36,8 +36,11 @@ "transaction_tracer.stack_trace_threshold": 0.0, "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, - "ml_insights_event.enabled": True, + "machine_learning.enabled": True, + #"machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True } + collector_agent_registration = collector_agent_registration_fixture( app_name="Python Agent Test (mlmodel_openai)", default_settings=_default_settings, diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index b428f329f2..599dd9275c 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -13,19 +13,182 @@ # limitations under the License. import openai +import pytest +from testing_support.fixtures import ( # function_not_called,; override_application_settings, + function_not_called, + override_application_settings, + reset_core_stats_engine, +) + +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import current_transaction, add_custom_attribute +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_event_payload import ( + validate_ml_event_payload, +) +from testing_support.validators.validate_ml_events import validate_ml_events +from testing_support.validators.validate_ml_events_outside_transaction import ( + validate_ml_events_outside_transaction, +) + +import newrelic.core.otlp_utils +from newrelic.api.application import application_instance as application +from newrelic.api.background_task import background_task +from newrelic.api.transaction import record_ml_event +from newrelic.core.config import global_settings +from newrelic.packages import six _test_openai_chat_completion_sync_messages = ( {"role": "system", "content": "You are a scientist."}, - {"role": "user", "content": "What is the boiling point of water?"}, - {"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, + #{"role": "user", "content": "What is the boiling point of water?"}, + #{"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, ) -def test_openai_chat_completion_sync(): +def set_trace_info(): + txn = current_transaction() + if txn: + txn._trace_id = "trace-id" + trace = current_trace() + if trace: + trace.guid = "span-id" + + +sync_chat_completion_recorded_events = [ + ( + {'type': 'LlmChatCompletionSummary'}, + { + 'id': None, # UUID that varies with each run + 'appName': 'Python Agent Test (mlmodel_openai)', + 'conversation_id': 'my-awesome-id', + 'transaction_id': None, + 'span_id': "span-id", + 'trace_id': "trace-id", + 'api_key_last_four_digits': 'sk-CRET', + 'response_time': None, # Response time varies each test run + 'request.model': 'gpt-3.5-turbo', + 'response.model': 'gpt-3.5-turbo-0613', + 'response.organization': 'new-relic-nkmd8b', + 'response.usage.completion_tokens': 11, + 'response.usage.total_tokens': 64, + 'response.usage.prompt_tokens': 53, + 'request.temperature': 0.7, + 'request.max_tokens': 100, + 'response.choices.finish_reason': 'stop', + 'response.api_type': 'None', + 'response.headers.llmVersion': '2020-10-01', + 'response.headers.ratelimitLimitRequests': 200, + 'response.headers.ratelimitLimitTokens': 40000, + 'response.headers.ratelimitResetTokens': "90ms", + 'response.headers.ratelimitResetRequests': "7m12s", + 'response.headers.ratelimitRemainingTokens': 39940, + 'response.headers.ratelimitRemainingRequests': 199, + 'vendor': 'openAI', + 'ingest_source': 'Python', + 'number_of_messages': 3, + 'api_version': '2020-10-01' + }, + ), + ( + {'type': 'LlmChatCompletionMessage'}, + { + 'id': None, + 'span_id': "span-id", + 'trace_id': "trace-id", + 'transaction_id': None, + 'content': 'You are a scientist.', + 'role': 'system', + 'completion_id': None, + 'sequence': 0, + 'model': 'gpt-3.5-turbo-0613', + 'vendor': 'openAI', + 'ingest_source': 'Python' + }, + ), + ( + {'type': 'LlmChatCompletionMessage'}, + { + 'id': None, + 'span_id': "span-id", + 'trace_id': "trace-id", + 'transaction_id': None, + 'content': 'What is 212 degrees Fahrenheit converted to Celsius?', + 'role': 'user', + 'completion_id': None, + 'sequence': 1, + 'model': 'gpt-3.5-turbo-0613', + 'vendor': 'openAI', + 'ingest_source': 'Python' + }, + ), + ( + {'type': 'LlmChatCompletionMessage'}, + { + 'id': None, + 'span_id': "span-id", + 'trace_id': "trace-id", + 'transaction_id': None, + 'content': '212 degrees Fahrenheit is equal to 100 degrees Celsius.', + 'role': 'assistant', + 'completion_id': None, + 'sequence': 2, + 'model': 'gpt-3.5-turbo-0613', + 'vendor': 'openAI', + 'ingest_source': 'Python' + } + ), +] + + +@reset_core_stats_engine() +@validate_ml_events(sync_chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_ml_event_count(count=4) +@background_task() +def test_openai_chat_completion_sync_in_txn(): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100 + ) + + +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_ml_event_count(count=0) +def test_openai_chat_completion_sync_outside_txn(): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100 + ) + + +disabled_ml_settings = { + "machine_learning.enabled": False, + "machine_learning.inference_events_value.enabled": False, + "ml_insights_events.enabled": False +} + + +@override_application_settings(disabled_ml_settings) +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_ml_event_count(count=0) +def test_openai_chat_completion_sync_disabled_settings(): + set_trace_info() openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100 ) From d0576631d009e481bd5887a3243aac99b097d823 Mon Sep 17 00:00:00 2001 From: Uma Annamalai Date: Tue, 10 Oct 2023 10:23:00 -0700 Subject: [PATCH 02/20] Remove commented code. --- tests/mlmodel_openai/conftest.py | 1 - tests/mlmodel_openai/test_chat_completion.py | 4 ---- 2 files changed, 5 deletions(-) diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index e84f6482a4..bb8fbeb6ee 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -37,7 +37,6 @@ "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, "machine_learning.enabled": True, - #"machine_learning.inference_events_value.enabled": True, "ml_insights_events.enabled": True } diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index 599dd9275c..f32a42bbf7 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -40,8 +40,6 @@ _test_openai_chat_completion_sync_messages = ( {"role": "system", "content": "You are a scientist."}, - #{"role": "user", "content": "What is the boiling point of water?"}, - #{"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, ) @@ -158,7 +156,6 @@ def test_openai_chat_completion_sync_in_txn(): @reset_core_stats_engine() -# One summary event, one system message, one user message, and one response message from the assistant @validate_ml_event_count(count=0) def test_openai_chat_completion_sync_outside_txn(): set_trace_info() @@ -180,7 +177,6 @@ def test_openai_chat_completion_sync_outside_txn(): @override_application_settings(disabled_ml_settings) @reset_core_stats_engine() -# One summary event, one system message, one user message, and one response message from the assistant @validate_ml_event_count(count=0) def test_openai_chat_completion_sync_disabled_settings(): set_trace_info() From 4a681f087e49eb7b1c229e42af414d234d48d49c Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Tue, 17 Oct 2023 14:23:10 -0700 Subject: [PATCH 03/20] Initial openai error commit --- newrelic/hooks/mlmodel_openai.py | 101 ++++++++++++++------ tests/mlmodel_openai/test_error.py | 145 +++++++++++++++++++++++++++++ 2 files changed, 220 insertions(+), 26 deletions(-) create mode 100644 tests/mlmodel_openai/test_error.py diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index c7afed04f5..201aac44bd 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -1,4 +1,3 @@ - # Copyright 2010 New Relic, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,15 +11,51 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import openai + import uuid + +import openai + from newrelic.api.function_trace import FunctionTrace -from newrelic.common.object_wrapper import wrap_function_wrapper -from newrelic.api.transaction import current_transaction from newrelic.api.time_trace import get_trace_linking_metadata -from newrelic.core.config import global_settings +from newrelic.api.transaction import current_transaction from newrelic.common.object_names import callable_name from newrelic.core.attribute import MAX_LOG_MESSAGE_LENGTH +from newrelic.core.config import global_settings + + +def openai_error_attributes(exception, request_args): + # 'id' attribute will always be 'None' in this case. + # 'completion_id' is generated at the completion of + # the request and all of these error types occur + # before the response is generated. + # + api_key_LFD = None + if openai.api_key: + api_key_LFD = f"sk-{openai.api_key[-4:]}" + + error_code = "None" + status_code = getattr(exception, "http_status", "None") + if status_code and status_code >= 400 and status_code < 600: + error_code = status_code + + error_attributes = { + "id": "None", + "api_key_last_four_digits": api_key_LFD, + "request.model": request_args.get("model") or request_args.get("engine", "None"), + "temperature": request_args.get("temperature", "None"), + "max_tokens": request_args.get("max_tokens", "None"), + "vendor": "openAI", + "ingest_source": "Python", + "organization": getattr(exception, "organization", "None"), + "number_of_messages": len(request_args.get("messages", [])), + "status_code": status_code, + "error_message": getattr(exception, "_message", "None"), + "error_type": exception.__class__.__name__ or "None", + "error_code": error_code, + "error_param": getattr(exception, "param", "None"), + } + return error_attributes def wrap_chat_completion_create(wrapped, instance, args, kwargs): @@ -31,13 +66,20 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): ft_name = callable_name(wrapped) with FunctionTrace(ft_name) as ft: - response = wrapped(*args, **kwargs) + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + error_attributes = openai_error_attributes(exc, kwargs) + transaction.notice_error(attributes=error_attributes) + raise if not response: return custom_attrs_dict = transaction._custom_params - conversation_id = custom_attrs_dict["conversation_id"] if "conversation_id" in custom_attrs_dict.keys() else str(uuid.uuid4()) + conversation_id = ( + custom_attrs_dict["conversation_id"] if "conversation_id" in custom_attrs_dict.keys() else str(uuid.uuid4()) + ) chat_completion_id = str(uuid.uuid4()) available_metadata = get_trace_linking_metadata() @@ -59,7 +101,7 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): "response_time": ft.duration, "request.model": kwargs.get("model") or kwargs.get("engine"), "response.model": response_model, - "response.organization": response.organization, + "response.organization": response.organization, "response.usage.completion_tokens": response.usage.completion_tokens, "response.usage.total_tokens": response.usage.total_tokens, "response.usage.prompt_tokens": response.usage.prompt_tokens, @@ -68,22 +110,36 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): "response.choices.finish_reason": response.choices[0].finish_reason, "response.api_type": response.api_type, "response.headers.llmVersion": response_headers.get("openai-version"), - "response.headers.ratelimitLimitRequests": check_rate_limit_header(response_headers, "x-ratelimit-limit-requests", True), - "response.headers.ratelimitLimitTokens": check_rate_limit_header(response_headers, "x-ratelimit-limit-tokens", True), - "response.headers.ratelimitResetTokens": check_rate_limit_header(response_headers, "x-ratelimit-reset-tokens", False), - "response.headers.ratelimitResetRequests": check_rate_limit_header(response_headers, "x-ratelimit-reset-requests", False), - "response.headers.ratelimitRemainingTokens": check_rate_limit_header(response_headers, "x-ratelimit-remaining-tokens", True), - "response.headers.ratelimitRemainingRequests": check_rate_limit_header(response_headers, "x-ratelimit-remaining-requests", True), + "response.headers.ratelimitLimitRequests": check_rate_limit_header( + response_headers, "x-ratelimit-limit-requests", True + ), + "response.headers.ratelimitLimitTokens": check_rate_limit_header( + response_headers, "x-ratelimit-limit-tokens", True + ), + "response.headers.ratelimitResetTokens": check_rate_limit_header( + response_headers, "x-ratelimit-reset-tokens", False + ), + "response.headers.ratelimitResetRequests": check_rate_limit_header( + response_headers, "x-ratelimit-reset-requests", False + ), + "response.headers.ratelimitRemainingTokens": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-tokens", True + ), + "response.headers.ratelimitRemainingRequests": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-requests", True + ), "vendor": "openAI", "ingest_source": "Python", "number_of_messages": len(kwargs.get("messages", [])) + len(response.choices), - "api_version": response_headers.get("openai-version") + "api_version": response_headers.get("openai-version"), } transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict) message_list = list(kwargs.get("messages", [])) + [response.choices[0].message] - create_chat_completion_message_event(transaction, message_list, chat_completion_id, span_id, trace_id, response_model) + create_chat_completion_message_event( + transaction, message_list, chat_completion_id, span_id, trace_id, response_model + ) return response @@ -98,7 +154,9 @@ def check_rate_limit_header(response_headers, header_name, is_int): return None -def create_chat_completion_message_event(transaction, message_list, chat_completion_id, span_id, trace_id, response_model): +def create_chat_completion_message_event( + transaction, message_list, chat_completion_id, span_id, trace_id, response_model +): if not transaction: return @@ -127,12 +185,3 @@ def wrap_convert_to_openai_object(wrapped, instance, args, kwargs): setattr(returned_response, "_nr_response_headers", getattr(resp, "_headers", {})) return returned_response - - -def instrument_openai_api_resources_chat_completion(module): - if hasattr(module.ChatCompletion, "create"): - wrap_function_wrapper(module, "ChatCompletion.create", wrap_chat_completion_create) - - -def instrument_openai_util(module): - wrap_function_wrapper(module, "convert_to_openai_object", wrap_convert_to_openai_object) diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py new file mode 100644 index 0000000000..8e1b11f774 --- /dev/null +++ b/tests/mlmodel_openai/test_error.py @@ -0,0 +1,145 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from testing_support.fixtures import override_application_settings +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) + +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name + +enabled_ml_settings = { + "machine_learning.enabled": True, + "machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True, +} + +_test_openai_chat_completion_sync_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + + +# No model provided +@override_application_settings(enabled_ml_settings) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "id": "None", + "api_key_last_four_digits": "sk-CRET", + "request.model": "None", + "temperature": 0.7, + "max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "organization": "None", + "number_of_messages": 2, + "status_code": "None", + "error_message": "Must provide an 'engine' or 'model' parameter to create a ", + "error_type": "InvalidRequestError", + "error_code": "None", + "error_param": "engine", + }, + }, +) +@background_task() +def test_invalid_request_error(): + with pytest.raises(openai.InvalidRequestError): + openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100, + ) + + +# No api_key provided +@override_application_settings(enabled_ml_settings) +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "id": "None", + "api_key_last_four_digits": "None", + "request.model": "gpt-3.5-turbo", + "temperature": 0.7, + "max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "organization": "None", + "number_of_messages": 2, + "status_code": "None", + "error_message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_pa", + "error_type": "AuthenticationError", + "error_code": "None", + "error_param": "None", + }, + }, +) +@background_task() +def test_authentication_error(monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, temperature=0.7, max_tokens=100 + ) + + +# Incorrect URL provided (404 error) +@override_application_settings(enabled_ml_settings) +@validate_error_trace_attributes( + callable_name(openai.error.APIError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "id": "None", + "api_key_last_four_digits": "sk-CRET", + "request.model": "gpt-3.5-turbo", + "temperature": 0.7, + "max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "organization": "None", + "number_of_messages": 2, + "status_code": 404, + "error_message": 'HTTP code 404 from API (\n\n404 Not Found\n\n

Not Found

\n

The requested URL was not found on this server.

\n
\n
Apache/2.4.25 (Debian) Server at thi', + "error_type": "APIError", + "error_code": 404, + "error_param": "None", + }, + }, +) +@background_task() +def test_api_error(): + openai.api_base = "http://thisisnotarealurl.com/" + with pytest.raises(openai.error.APIError): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100, + ) + + +# Timeout is raised by requests.exceptions.Timeout +# APIConnectionError is raised by requests.exceptions.RequestException From 4de9da63aaa3a1518fa2d96cac358631ce36d257 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Fri, 20 Oct 2023 17:24:54 -0700 Subject: [PATCH 04/20] Add example tests and mock error responses --- .../_mock_external_openai_server.py | 33 ++++++++++- tests/mlmodel_openai/conftest.py | 26 ++++++++- tests/mlmodel_openai/test_error.py | 57 +++++++++++++++++++ 3 files changed, 113 insertions(+), 3 deletions(-) create mode 100644 tests/mlmodel_openai/test_error.py diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 438e4072d8..6bbf5d5003 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -28,6 +28,33 @@ # 3) This app runs on a separate thread meaning it won't block the test app. RESPONSES = { + "Invalid API key.": ( + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": "null", + "code": "invalid_api_key", + } + }, + ), + "Model does not exist.": ( + { + "Content-Type": "application/json", + "x-request-id": "cfdf51fb795362ae578c12a21796262c", + }, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": "null", + "code": "model_not_found", + } + }, + ), "This is an embedding test.": ( { "Content-Type": "application/json", @@ -42,6 +69,7 @@ "x-ratelimit-reset-tokens": "2ms", "x-request-id": "c70828b2293314366a76a2b1dcb20688", }, + 200, { "data": [ { @@ -70,6 +98,7 @@ "x-ratelimit-reset-tokens": "90ms", "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", }, + 200, { "choices": [ { @@ -105,7 +134,7 @@ def simple_get(self): headers, response = ({}, "") for k, v in RESPONSES.items(): if prompt.startswith(k): - headers, response = v + headers, status_code, response = v break else: # If no matches found self.send_response(500) @@ -114,7 +143,7 @@ def simple_get(self): return # Send response code - self.send_response(200) + self.send_response(status_code) # Send headers for k, v in headers.items(): diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 0e59b3e970..4513be742d 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -90,6 +90,9 @@ def openai_server(): # Apply function wrappers to record data wrap_function_wrapper("openai.api_requestor", "APIRequestor.request", wrap_openai_api_requestor_request) + wrap_function_wrapper( + "openai.api_requestor", "APIRequestor._interpret_response", wrap_openai_api_requestor_interpret_response + ) yield # Run tests # Write responses to audit log @@ -101,6 +104,23 @@ def openai_server(): RECORDED_HEADERS = set(["x-request-id", "content-type"]) +def wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwargs): + rbody, rcode, rheaders = bind_request_interpret_response_params(*args, **kwargs) + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + rheaders.items(), + ) + ) + + if rcode >= 400 or rcode < 200: + rbody = json.loads(rbody) + OPENAI_AUDIT_LOG_CONTENTS["error"] = headers, rcode, rbody # Append response data to audit log + return wrapped(*args, **kwargs) + + def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): params = bind_request_params(*args, **kwargs) if not params: @@ -124,9 +144,13 @@ def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): ) # Log response - OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, result.http_status, data # Append response data to audit log return result def bind_request_params(method, url, params=None, *args, **kwargs): return params + + +def bind_request_interpret_response_params(result, stream): + return result.content.decode("utf-8"), result.status_code, result.headers diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py new file mode 100644 index 0000000000..28a57c45b6 --- /dev/null +++ b/tests/mlmodel_openai/test_error.py @@ -0,0 +1,57 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest + +from newrelic.api.background_task import background_task + +enabled_ml_settings = { + "machine_learning.enabled": True, + "machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True, +} + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + + +@background_task() +def test_invalid_request_error_model_does_not_exist(): + with pytest.raises(openai.InvalidRequestError): + openai.ChatCompletion.create( + model="does-not-exist", + messages=( + {"role": "system", "content": "Model does not exist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, + ), + temperature=0.7, + max_tokens=100, + ) + + +@background_task() +def test_authentication_error_invalid_api_key(): + with pytest.raises(openai.error.AuthenticationError): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=( + {"role": "system", "content": "Invalid API key."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, + ), + temperature=0.7, + max_tokens=100, + ) From 28549656f543b2ee1bf52e9e6bd60b30f5804ed3 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 23 Oct 2023 13:56:17 -0700 Subject: [PATCH 05/20] Changes to attribute collection --- newrelic/api/time_trace.py | 53 ++++++++------ newrelic/hooks/mlmodel_openai.py | 47 ++++++------- tests/mlmodel_openai/test_error.py | 107 ++++++++++++++--------------- tox.ini | 1 + 4 files changed, 107 insertions(+), 101 deletions(-) diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 24be0e00f6..2c982aa016 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -29,7 +29,6 @@ ) from newrelic.core.config import is_expected_error, should_ignore_error from newrelic.core.trace_cache import trace_cache - from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -220,7 +219,7 @@ def add_code_level_metrics(self, source): % (source, exc) ) - def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_code=None): + def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_code=None, message=None): # Bail out if the transaction is not active or # collection of errors not enabled. @@ -262,10 +261,14 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c # Check to see if we need to strip the message before recording it. - if settings.strip_exception_messages.enabled and fullname not in settings.strip_exception_messages.allowlist: - message = STRIP_EXCEPTION_MESSAGE - else: - message = message_raw + if not message: + if ( + settings.strip_exception_messages.enabled + and fullname not in settings.strip_exception_messages.allowlist + ): + message = STRIP_EXCEPTION_MESSAGE + else: + message = message_raw # Where expected or ignore are a callable they should return a # tri-state variable with the following behavior. @@ -359,7 +362,7 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c return fullname, message, message_raw, tb, is_expected - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): + def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None, message=None): attributes = attributes if attributes is not None else {} # If no exception details provided, use current exception. @@ -379,6 +382,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, ignore=ignore, expected=expected, status_code=status_code, + message=message, ) if recorded: fullname, message, message_raw, tb, is_expected = recorded @@ -422,23 +426,32 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, input_attributes = {} input_attributes.update(transaction._custom_params) input_attributes.update(attributes) - error_group_name_raw = settings.error_collector.error_group_callback(value, { - "traceback": tb, - "error.class": exc, - "error.message": message_raw, - "error.expected": is_expected, - "custom_params": input_attributes, - "transactionName": getattr(transaction, "name", None), - "response.status": getattr(transaction, "_response_code", None), - "request.method": getattr(transaction, "_request_method", None), - "request.uri": getattr(transaction, "_request_uri", None), - }) + error_group_name_raw = settings.error_collector.error_group_callback( + value, + { + "traceback": tb, + "error.class": exc, + "error.message": message_raw, + "error.expected": is_expected, + "custom_params": input_attributes, + "transactionName": getattr(transaction, "name", None), + "response.status": getattr(transaction, "_response_code", None), + "request.method": getattr(transaction, "_request_method", None), + "request.uri": getattr(transaction, "_request_uri", None), + }, + ) if error_group_name_raw: _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, six.string_types): - raise ValueError("Invalid attribute value for error.group.name. Expected string, got: %s" % repr(error_group_name_raw)) + raise ValueError( + "Invalid attribute value for error.group.name. Expected string, got: %s" + % repr(error_group_name_raw) + ) except Exception: - _logger.error("Encountered error when calling error group callback:\n%s", "".join(traceback.format_exception(*sys.exc_info()))) + _logger.error( + "Encountered error when calling error group callback:\n%s", + "".join(traceback.format_exception(*sys.exc_info())), + ) error_group_name = None transaction._create_error_node( diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 2b97785c69..d7b6ba239a 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -28,35 +28,22 @@ def openai_error_attributes(exception, request_args): - # 'id' attribute will always be 'None' in this case. - # 'completion_id' is generated at the completion of - # the request and all of these error types occur - # before the response is generated. - # - api_key_LFD = None - if openai.api_key: - api_key_LFD = f"sk-{openai.api_key[-4:]}" - - error_code = "None" - status_code = getattr(exception, "http_status", "None") - if status_code and status_code >= 400 and status_code < 600: - error_code = status_code + api_key = getattr(openai, "api_key", None) + api_key_LFD = f"sk-{api_key[-4:]}" if api_key else "" error_attributes = { - "id": "None", "api_key_last_four_digits": api_key_LFD, - "request.model": request_args.get("model") or request_args.get("engine", "None"), - "temperature": request_args.get("temperature", "None"), - "max_tokens": request_args.get("max_tokens", "None"), + "request.model": request_args.get("model") or request_args.get("engine") or "", + "request.temperature": request_args.get("temperature", ""), + "request.max_tokens": request_args.get("max_tokens", ""), "vendor": "openAI", "ingest_source": "Python", - "organization": getattr(exception, "organization", "None"), - "number_of_messages": len(request_args.get("messages", [])), - "status_code": status_code, - "error_message": getattr(exception, "_message", "None"), - "error_type": exception.__class__.__name__ or "None", - "error_code": error_code, - "error_param": getattr(exception, "param", "None"), + "response.organization": getattr(exception, "organization", ""), + "response.number_of_messages": len(request_args.get("messages", [])), + "status_code": getattr(exception, "http_status", ""), + "error.message": getattr(exception, "_message", ""), + "error.code": getattr(getattr(exception, "error", ""), "code", ""), + "error.param": getattr(exception, "param", ""), } return error_attributes @@ -74,7 +61,11 @@ def wrap_embedding_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - transaction.notice_error(attributes=error_attributes) + ft.notice_error( + status_code=error_attributes.pop("status_code"), + message=error_attributes.pop("error.message"), + attributes=error_attributes, + ) raise if not response: @@ -148,7 +139,11 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - transaction.notice_error(attributes=error_attributes) + ft.notice_error( + status_code=error_attributes.pop("status_code"), + message=error_attributes.pop("error.message"), + attributes=error_attributes, + ) raise if not response: diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py index 8e1b11f774..1e0bbc6ac4 100644 --- a/tests/mlmodel_openai/test_error.py +++ b/tests/mlmodel_openai/test_error.py @@ -18,6 +18,7 @@ from testing_support.validators.validate_error_trace_attributes import ( validate_error_trace_attributes, ) +from testing_support.validators.validate_span_events import validate_span_events from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name @@ -26,6 +27,7 @@ "machine_learning.enabled": True, "machine_learning.inference_events_value.enabled": True, "ml_insights_events.enabled": True, + "error_collector.ignore_status_codes": [], } _test_openai_chat_completion_sync_messages = ( @@ -42,25 +44,24 @@ "agent": {}, "intrinsic": {}, "user": { - "id": "None", - "api_key_last_four_digits": "sk-CRET", - "request.model": "None", - "temperature": 0.7, - "max_tokens": 100, + # "api_key_last_four_digits": "sk-CRET", + "api_key_last_four_digits": "sk-rSJc", + "request.temperature": 0.7, + "request.max_tokens": 100, "vendor": "openAI", "ingest_source": "Python", - "organization": "None", - "number_of_messages": 2, - "status_code": "None", - "error_message": "Must provide an 'engine' or 'model' parameter to create a ", - "error_type": "InvalidRequestError", - "error_code": "None", - "error_param": "engine", + "response.number_of_messages": 2, + "error.param": "engine", }, }, ) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) @background_task() -def test_invalid_request_error(): +def test_invalid_request_error_no_model(): with pytest.raises(openai.InvalidRequestError): openai.ChatCompletion.create( # no model provided, @@ -70,76 +71,72 @@ def test_invalid_request_error(): ) -# No api_key provided @override_application_settings(enabled_ml_settings) @validate_error_trace_attributes( - callable_name(openai.error.AuthenticationError), + callable_name(openai.InvalidRequestError), exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "id": "None", - "api_key_last_four_digits": "None", - "request.model": "gpt-3.5-turbo", - "temperature": 0.7, - "max_tokens": 100, + # "api_key_last_four_digits": "sk-CRET", + "api_key_last_four_digits": "sk-rSJc", + "request.model": "I DO NOT EXIST", + "request.temperature": 0.7, + "request.max_tokens": 100, "vendor": "openAI", "ingest_source": "Python", - "organization": "None", - "number_of_messages": 2, - "status_code": "None", - "error_message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_pa", - "error_type": "AuthenticationError", - "error_code": "None", - "error_param": "None", + "response.number_of_messages": 2, + "error.code": "model_not_found", }, }, ) +@validate_span_events( + exact_agents={ + "error.message": "The model `I DO NOT EXIST` does not exist", + } +) +@validate_span_events( + exact_agents={ + "http.statusCode": 404, + } +) @background_task() -def test_authentication_error(monkeypatch): - with pytest.raises(openai.error.AuthenticationError): - monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None +def test_invalid_request_error_invalid_model(): + with pytest.raises(openai.InvalidRequestError): openai.ChatCompletion.create( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, temperature=0.7, max_tokens=100 + model="I DO NOT EXIST", + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100, ) -# Incorrect URL provided (404 error) +# No api_key provided @override_application_settings(enabled_ml_settings) @validate_error_trace_attributes( - callable_name(openai.error.APIError), + callable_name(openai.error.AuthenticationError), exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "id": "None", - "api_key_last_four_digits": "sk-CRET", "request.model": "gpt-3.5-turbo", - "temperature": 0.7, - "max_tokens": 100, + "request.temperature": 0.7, + "request.max_tokens": 100, "vendor": "openAI", "ingest_source": "Python", - "organization": "None", - "number_of_messages": 2, - "status_code": 404, - "error_message": 'HTTP code 404 from API (\n\n404 Not Found\n\n

Not Found

\n

The requested URL was not found on this server.

\n
\n
Apache/2.4.25 (Debian) Server at thi', - "error_type": "APIError", - "error_code": 404, - "error_param": "None", + "response.number_of_messages": 2, }, }, ) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) @background_task() -def test_api_error(): - openai.api_base = "http://thisisnotarealurl.com/" - with pytest.raises(openai.error.APIError): +def test_authentication_error(monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=_test_openai_chat_completion_sync_messages, - temperature=0.7, - max_tokens=100, + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, temperature=0.7, max_tokens=100 ) - - -# Timeout is raised by requests.exceptions.Timeout -# APIConnectionError is raised by requests.exceptions.RequestException diff --git a/tox.ini b/tox.ini index 8431b5ebd6..10e5bbd511 100644 --- a/tox.ini +++ b/tox.ini @@ -345,6 +345,7 @@ deps = framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip mlmodel_openai: openai[datalib] + mlmodel_openai: protobuf logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6 From 683ab9db7011110980df9bf6bf6b830b1a80bae6 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 23 Oct 2023 16:27:27 -0700 Subject: [PATCH 06/20] Change error tests to match mock server --- tests/mlmodel_openai/test_error.py | 56 +++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py index 1e0bbc6ac4..120d054f20 100644 --- a/tests/mlmodel_openai/test_error.py +++ b/tests/mlmodel_openai/test_error.py @@ -44,8 +44,7 @@ "agent": {}, "intrinsic": {}, "user": { - # "api_key_last_four_digits": "sk-CRET", - "api_key_last_four_digits": "sk-rSJc", + "api_key_last_four_digits": "sk-CRET", "request.temperature": 0.7, "request.max_tokens": 100, "vendor": "openAI", @@ -78,21 +77,20 @@ def test_invalid_request_error_no_model(): "agent": {}, "intrinsic": {}, "user": { - # "api_key_last_four_digits": "sk-CRET", - "api_key_last_four_digits": "sk-rSJc", - "request.model": "I DO NOT EXIST", + "api_key_last_four_digits": "sk-CRET", + "request.model": "does-not-exist", "request.temperature": 0.7, "request.max_tokens": 100, "vendor": "openAI", "ingest_source": "Python", - "response.number_of_messages": 2, + "response.number_of_messages": 1, "error.code": "model_not_found", }, }, ) @validate_span_events( exact_agents={ - "error.message": "The model `I DO NOT EXIST` does not exist", + "error.message": "The model `does-not-exist` does not exist", } ) @validate_span_events( @@ -104,8 +102,8 @@ def test_invalid_request_error_no_model(): def test_invalid_request_error_invalid_model(): with pytest.raises(openai.InvalidRequestError): openai.ChatCompletion.create( - model="I DO NOT EXIST", - messages=_test_openai_chat_completion_sync_messages, + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), temperature=0.7, max_tokens=100, ) @@ -140,3 +138,43 @@ def test_authentication_error(monkeypatch): openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, temperature=0.7, max_tokens=100 ) + + +# Wrong api_key provided +@override_application_settings(enabled_ml_settings) +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-BEEF", + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "response.number_of_messages": 1, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys." + } +) +@validate_span_events( + exact_agents={ + "http.statusCode": 401, + } +) +@background_task() +def test_wrong_api_key_error(monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) From e8ae2d4fc45fc6ab3001cfc33f0708718d713ff0 Mon Sep 17 00:00:00 2001 From: lrafeei Date: Mon, 23 Oct 2023 23:29:14 +0000 Subject: [PATCH 07/20] [Mega-Linter] Apply linters fixes --- newrelic/api/time_trace.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 2c982aa016..f69c8fa84a 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -608,13 +608,11 @@ def update_async_exclusive_time(self, min_child_start_time, exclusive_duration): def process_child(self, node, is_async): self.children.append(node) if is_async: - # record the lowest start time self.min_child_start_time = min(self.min_child_start_time, node.start_time) # if there are no children running, finalize exclusive time if self.child_count == len(self.children): - exclusive_duration = node.end_time - self.min_child_start_time self.update_async_exclusive_time(self.min_child_start_time, exclusive_duration) From 207c3206e1079c77933bcaaa487871c81ddd034b Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 23 Oct 2023 16:39:03 -0700 Subject: [PATCH 08/20] Trigger tests From db1196ad36a88aea2a30cc3dbbfadaf863e8beec Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Wed, 25 Oct 2023 14:19:15 -0700 Subject: [PATCH 09/20] Add dt_enabled decorator to error tests --- tests/mlmodel_openai/test_error.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_error.py index 120d054f20..35d8c79024 100644 --- a/tests/mlmodel_openai/test_error.py +++ b/tests/mlmodel_openai/test_error.py @@ -14,7 +14,11 @@ import openai import pytest -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import ( + dt_enabled, + override_application_settings, + reset_core_stats_engine, +) from testing_support.validators.validate_error_trace_attributes import ( validate_error_trace_attributes, ) @@ -27,7 +31,7 @@ "machine_learning.enabled": True, "machine_learning.inference_events_value.enabled": True, "ml_insights_events.enabled": True, - "error_collector.ignore_status_codes": [], + "error_collector.ignore_status_codes": set(), } _test_openai_chat_completion_sync_messages = ( @@ -38,6 +42,8 @@ # No model provided @override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() @validate_error_trace_attributes( callable_name(openai.InvalidRequestError), exact_attrs={ @@ -70,7 +76,10 @@ def test_invalid_request_error_no_model(): ) +# Invalid model provided @override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() @validate_error_trace_attributes( callable_name(openai.InvalidRequestError), exact_attrs={ @@ -111,6 +120,8 @@ def test_invalid_request_error_invalid_model(): # No api_key provided @override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() @validate_error_trace_attributes( callable_name(openai.error.AuthenticationError), exact_attrs={ @@ -136,12 +147,17 @@ def test_authentication_error(monkeypatch): with pytest.raises(openai.error.AuthenticationError): monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None openai.ChatCompletion.create( - model="gpt-3.5-turbo", messages=_test_openai_chat_completion_sync_messages, temperature=0.7, max_tokens=100 + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_sync_messages, + temperature=0.7, + max_tokens=100, ) # Wrong api_key provided @override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() @validate_error_trace_attributes( callable_name(openai.error.AuthenticationError), exact_attrs={ From 9444850c840a0d0210d0431368e02249d7d3c337 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Fri, 27 Oct 2023 19:15:40 -0700 Subject: [PATCH 10/20] Add embedded and async error tests --- newrelic/hooks/mlmodel_openai.py | 29 +- .../_mock_external_openai_server.py | 12 + ...error.py => test_chat_completion_error.py} | 171 ++++++++++- tests/mlmodel_openai/test_embeddings_error.py | 289 ++++++++++++++++++ 4 files changed, 489 insertions(+), 12 deletions(-) rename tests/mlmodel_openai/{test_error.py => test_chat_completion_error.py} (51%) create mode 100644 tests/mlmodel_openai/test_embeddings_error.py diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index d7b6ba239a..1e7022e04a 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -29,17 +29,18 @@ def openai_error_attributes(exception, request_args): api_key = getattr(openai, "api_key", None) - api_key_LFD = f"sk-{api_key[-4:]}" if api_key else "" + api_key_last_four_digits = f"sk-{api_key[-4:]}" if api_key else "" + number_of_messages = len(request_args.get("messages", [])) error_attributes = { - "api_key_last_four_digits": api_key_LFD, + "api_key_last_four_digits": api_key_last_four_digits, "request.model": request_args.get("model") or request_args.get("engine") or "", "request.temperature": request_args.get("temperature", ""), "request.max_tokens": request_args.get("max_tokens", ""), "vendor": "openAI", "ingest_source": "Python", "response.organization": getattr(exception, "organization", ""), - "response.number_of_messages": len(request_args.get("messages", [])), + "response.number_of_messages": number_of_messages if number_of_messages > 0 else "", "status_code": getattr(exception, "http_status", ""), "error.message": getattr(exception, "_message", ""), "error.code": getattr(getattr(exception, "error", ""), "code", ""), @@ -289,7 +290,16 @@ async def wrap_embedding_acreate(wrapped, instance, args, kwargs): ft_name = callable_name(wrapped) with FunctionTrace(ft_name) as ft: - response = await wrapped(*args, **kwargs) + try: + response = await wrapped(*args, **kwargs) + except Exception as exc: + error_attributes = openai_error_attributes(exc, kwargs) + ft.notice_error( + status_code=error_attributes["status_code"], + message=error_attributes.pop("error.message"), + attributes=error_attributes, + ) + raise if not response: return response @@ -362,7 +372,16 @@ async def wrap_chat_completion_acreate(wrapped, instance, args, kwargs): ft_name = callable_name(wrapped) with FunctionTrace(ft_name) as ft: - response = await wrapped(*args, **kwargs) + try: + response = await wrapped(*args, **kwargs) + except Exception as exc: + error_attributes = openai_error_attributes(exc, kwargs) + ft.notice_error( + status_code=error_attributes["status_code"], + message=error_attributes.pop("error.message"), + attributes=error_attributes, + ) + raise if not response: return response diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 6bbf5d5003..3cd2ec9521 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -40,6 +40,18 @@ } }, ), + "Embedded: Invalid API key.": ( + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": "null", + "code": "invalid_api_key", + } + }, + ), "Model does not exist.": ( { "Content-Type": "application/json", diff --git a/tests/mlmodel_openai/test_error.py b/tests/mlmodel_openai/test_chat_completion_error.py similarity index 51% rename from tests/mlmodel_openai/test_error.py rename to tests/mlmodel_openai/test_chat_completion_error.py index 35d8c79024..91f7b8e4ed 100644 --- a/tests/mlmodel_openai/test_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -34,12 +34,14 @@ "error_collector.ignore_status_codes": set(), } -_test_openai_chat_completion_sync_messages = ( +_test_openai_chat_completion_messages = ( {"role": "system", "content": "You are a scientist."}, {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, ) +# Sync tests: + # No model provided @override_application_settings(enabled_ml_settings) @dt_enabled @@ -66,11 +68,11 @@ } ) @background_task() -def test_invalid_request_error_no_model(): +def test_chat_completion_invalid_request_error_no_model(): with pytest.raises(openai.InvalidRequestError): openai.ChatCompletion.create( # no model provided, - messages=_test_openai_chat_completion_sync_messages, + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, ) @@ -108,7 +110,7 @@ def test_invalid_request_error_no_model(): } ) @background_task() -def test_invalid_request_error_invalid_model(): +def test_chat_completion_invalid_request_error_invalid_model(): with pytest.raises(openai.InvalidRequestError): openai.ChatCompletion.create( model="does-not-exist", @@ -143,12 +145,12 @@ def test_invalid_request_error_invalid_model(): } ) @background_task() -def test_authentication_error(monkeypatch): +def test_chat_completion_authentication_error(monkeypatch): with pytest.raises(openai.error.AuthenticationError): monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None openai.ChatCompletion.create( model="gpt-3.5-turbo", - messages=_test_openai_chat_completion_sync_messages, + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, ) @@ -185,7 +187,7 @@ def test_authentication_error(monkeypatch): } ) @background_task() -def test_wrong_api_key_error(monkeypatch): +def test_chat_completion_wrong_api_key_error(monkeypatch): with pytest.raises(openai.error.AuthenticationError): monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" openai.ChatCompletion.create( @@ -194,3 +196,158 @@ def test_wrong_api_key_error(monkeypatch): temperature=0.7, max_tokens=100, ) + + +# Async tests: + +# No model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "response.number_of_messages": 2, + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async(loop): + with pytest.raises(openai.InvalidRequestError): + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + ) + + +# Invalid model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "request.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error.code": "model_not_found", + "status_code": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async(loop): + with pytest.raises(openai.InvalidRequestError): + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +# No api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@background_task() +def test_chat_completion_authentication_error_async(loop, monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +# Wrong api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-BEEF", + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "openAI", + "ingest_source": "Python", + "response.number_of_messages": 1, + "status_code": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys." + } +) +@background_task() +def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + ) diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py new file mode 100644 index 0000000000..947d156cca --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -0,0 +1,289 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_application_settings, + reset_core_stats_engine, +) +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events + +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name + +enabled_ml_settings = { + "machine_learning.enabled": True, + "machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True, + "error_collector.ignore_status_codes": set(), +} + + +# Sync tests: + +# No model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "vendor": "openAI", + "ingest_source": "Python", + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@background_task() +def test_embeddings_invalid_request_error_no_model(): + with pytest.raises(openai.InvalidRequestError): + openai.Embedding.create( + input="This is an embedding test with no model.", + # no model provided + ) + + +# Invalid model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "request.model": "does-not-exist", + "vendor": "openAI", + "ingest_source": "Python", + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_span_events( + exact_agents={ + "http.statusCode": 404, + } +) +@background_task() +def test_embeddings_invalid_request_error_invalid_model(): + with pytest.raises(openai.InvalidRequestError): + openai.Embedding.create(input="Model does not exist.", model="does-not-exist") + + +# No api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "request.model": "text-embedding-ada-002", + "vendor": "openAI", + "ingest_source": "Python", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@background_task() +def test_embeddings_authentication_error(monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + openai.Embedding.create(input="Invalid API key.", model="text-embedding-ada-002") + + +# Wrong api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-BEEF", + "request.model": "text-embedding-ada-002", + "vendor": "openAI", + "ingest_source": "Python", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys." + } +) +@validate_span_events( + exact_agents={ + "http.statusCode": 401, + } +) +@background_task() +def test_embeddings_wrong_api_key_error(monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + openai.Embedding.create(input="Embedded: Invalid API key.", model="text-embedding-ada-002") + + +# Async tests: + +# No model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "vendor": "openAI", + "ingest_source": "Python", + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@background_task() +def test_embeddings_invalid_request_error_no_model_async(loop): + with pytest.raises(openai.InvalidRequestError): + loop.run_until_complete( + openai.Embedding.acreate( + input="This is an embedding test with no model.", + # No model provided + ) + ) + + +# Invalid model provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-CRET", + "request.model": "does-not-exist", + "vendor": "openAI", + "ingest_source": "Python", + "error.code": "model_not_found", + "status_code": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async(loop): + with pytest.raises(openai.InvalidRequestError): + loop.run_until_complete(openai.Embedding.acreate(input="Model does not exist.", model="does-not-exist")) + + +# No api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "request.model": "text-embedding-ada-002", + "vendor": "openAI", + "ingest_source": "Python", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@background_task() +def test_embeddings_authentication_error_async(loop, monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + loop.run_until_complete(openai.Embedding.acreate(input="Invalid API key.", model="text-embedding-ada-002")) + + +# Wrong api_key provided +@override_application_settings(enabled_ml_settings) +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "api_key_last_four_digits": "sk-BEEF", + "request.model": "text-embedding-ada-002", + "vendor": "openAI", + "ingest_source": "Python", + "status_code": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys." + } +) +@background_task() +def test_embeddings_wrong_api_key_error_async(loop, monkeypatch): + with pytest.raises(openai.error.AuthenticationError): + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + loop.run_until_complete( + openai.Embedding.acreate(input="Embedded: Invalid API key.", model="text-embedding-ada-002") + ) From d6c131006b94d249db89ae4f58569568d48b8ee1 Mon Sep 17 00:00:00 2001 From: lrafeei Date: Sat, 28 Oct 2023 02:23:58 +0000 Subject: [PATCH 11/20] [Mega-Linter] Apply linters fixes --- tests/mlmodel_openai/test_chat_completion_error.py | 2 ++ tests/mlmodel_openai/test_embeddings_error.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index 91f7b8e4ed..1ea07219b3 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -42,6 +42,7 @@ # Sync tests: + # No model provided @override_application_settings(enabled_ml_settings) @dt_enabled @@ -200,6 +201,7 @@ def test_chat_completion_wrong_api_key_error(monkeypatch): # Async tests: + # No model provided @override_application_settings(enabled_ml_settings) @dt_enabled diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py index 947d156cca..7de738edb7 100644 --- a/tests/mlmodel_openai/test_embeddings_error.py +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -37,6 +37,7 @@ # Sync tests: + # No model provided @override_application_settings(enabled_ml_settings) @dt_enabled @@ -166,6 +167,7 @@ def test_embeddings_wrong_api_key_error(monkeypatch): # Async tests: + # No model provided @override_application_settings(enabled_ml_settings) @dt_enabled From aa6101885f6d7fb7392a3cfb97f49773a349ef09 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Fri, 27 Oct 2023 20:03:23 -0700 Subject: [PATCH 12/20] Trigger tests From e825038f08c07a26f3b6b5db64e3da620a68ebfb Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 30 Oct 2023 12:25:21 -0700 Subject: [PATCH 13/20] Add http.statusCode to span before notice_error call --- newrelic/hooks/mlmodel_openai.py | 20 +++++++++++++++---- .../test_chat_completion_error.py | 16 ++++----------- tests/mlmodel_openai/test_embeddings_error.py | 16 ++++----------- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 1e7022e04a..7adb941704 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -62,8 +62,11 @@ def wrap_embedding_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) + status_code = error_attributes.pop("status_code") + # Usually the status_code is only added to an external error trace's attrs. + ft._add_agent_attribute("http.statusCode", status_code) ft.notice_error( - status_code=error_attributes.pop("status_code"), + status_code=status_code, message=error_attributes.pop("error.message"), attributes=error_attributes, ) @@ -140,8 +143,11 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) + status_code = error_attributes.pop("status_code") + # Usually the status_code is only added to an external error trace's attrs. + ft._add_agent_attribute("http.statusCode", status_code) ft.notice_error( - status_code=error_attributes.pop("status_code"), + status_code=status_code, message=error_attributes.pop("error.message"), attributes=error_attributes, ) @@ -294,8 +300,11 @@ async def wrap_embedding_acreate(wrapped, instance, args, kwargs): response = await wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) + status_code = error_attributes.pop("status_code") + # Usually the status_code is only added to an external error trace's attrs. + ft._add_agent_attribute("http.statusCode", status_code) ft.notice_error( - status_code=error_attributes["status_code"], + status_code=status_code, message=error_attributes.pop("error.message"), attributes=error_attributes, ) @@ -376,8 +385,11 @@ async def wrap_chat_completion_acreate(wrapped, instance, args, kwargs): response = await wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) + status_code = error_attributes.pop("status_code") + # Usually the status_code is only added to an external error trace's attrs. + ft._add_agent_attribute("http.statusCode", status_code) ft.notice_error( - status_code=error_attributes["status_code"], + status_code=status_code, message=error_attributes.pop("error.message"), attributes=error_attributes, ) diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index 1ea07219b3..aeb96822b3 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -103,10 +103,6 @@ def test_chat_completion_invalid_request_error_no_model(): @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - } -) -@validate_span_events( - exact_agents={ "http.statusCode": 404, } ) @@ -179,11 +175,7 @@ def test_chat_completion_authentication_error(monkeypatch): ) @validate_span_events( exact_agents={ - "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys." - } -) -@validate_span_events( - exact_agents={ + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", "http.statusCode": 401, } ) @@ -258,13 +250,13 @@ def test_chat_completion_invalid_request_error_no_model_async(loop): "ingest_source": "Python", "response.number_of_messages": 1, "error.code": "model_not_found", - "status_code": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", + "http.statusCode": 404, } ) @background_task() @@ -332,13 +324,13 @@ def test_chat_completion_authentication_error_async(loop, monkeypatch): "vendor": "openAI", "ingest_source": "Python", "response.number_of_messages": 1, - "status_code": 401, }, }, ) @validate_span_events( exact_agents={ - "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys." + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + "http.statusCode": 401, } ) @background_task() diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py index 7de738edb7..e32ddcad24 100644 --- a/tests/mlmodel_openai/test_embeddings_error.py +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -90,10 +90,6 @@ def test_embeddings_invalid_request_error_no_model(): @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - } -) -@validate_span_events( - exact_agents={ "http.statusCode": 404, } ) @@ -150,11 +146,7 @@ def test_embeddings_authentication_error(monkeypatch): ) @validate_span_events( exact_agents={ - "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys." - } -) -@validate_span_events( - exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", "http.statusCode": 401, } ) @@ -216,13 +208,13 @@ def test_embeddings_invalid_request_error_no_model_async(loop): "vendor": "openAI", "ingest_source": "Python", "error.code": "model_not_found", - "status_code": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", + "http.statusCode": 404, } ) @background_task() @@ -273,13 +265,13 @@ def test_embeddings_authentication_error_async(loop, monkeypatch): "request.model": "text-embedding-ada-002", "vendor": "openAI", "ingest_source": "Python", - "status_code": 401, }, }, ) @validate_span_events( exact_agents={ - "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys." + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "http.statusCode": 401, } ) @background_task() From 486d2db1c04de76672a2be07c0ff20b69056d337 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 30 Oct 2023 13:29:13 -0700 Subject: [PATCH 14/20] Report number of messages in error trace even if 0 --- newrelic/hooks/mlmodel_openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 7adb941704..5197c20bd0 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -40,7 +40,7 @@ def openai_error_attributes(exception, request_args): "vendor": "openAI", "ingest_source": "Python", "response.organization": getattr(exception, "organization", ""), - "response.number_of_messages": number_of_messages if number_of_messages > 0 else "", + "response.number_of_messages": number_of_messages, "status_code": getattr(exception, "http_status", ""), "error.message": getattr(exception, "_message", ""), "error.code": getattr(getattr(exception, "error", ""), "code", ""), From 3872c1e152034f42e1b4c5127b5c5f1002e16fdb Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Mon, 30 Oct 2023 16:25:29 -0700 Subject: [PATCH 15/20] Revert notice_error and add _nr_message attr --- newrelic/api/time_trace.py | 22 ++++++++-------- newrelic/core/stats_engine.py | 5 ++++ newrelic/hooks/mlmodel_openai.py | 26 ++++--------------- .../test_chat_completion_error.py | 8 +++--- tests/mlmodel_openai/test_embeddings_error.py | 9 ++++--- 5 files changed, 30 insertions(+), 40 deletions(-) diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index f69c8fa84a..40ef225129 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -219,7 +219,7 @@ def add_code_level_metrics(self, source): % (source, exc) ) - def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_code=None, message=None): + def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_code=None): # Bail out if the transaction is not active or # collection of errors not enabled. @@ -259,16 +259,17 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c module, name, fullnames, message_raw = parse_exc_info((exc, value, tb)) fullname = fullnames[0] + # In case message is in JSON format for OpenAI models + # this will result in a "cleaner" message format + if getattr(value, "_nr_message", None): + message_raw = value._nr_message + # Check to see if we need to strip the message before recording it. - if not message: - if ( - settings.strip_exception_messages.enabled - and fullname not in settings.strip_exception_messages.allowlist - ): - message = STRIP_EXCEPTION_MESSAGE - else: - message = message_raw + if settings.strip_exception_messages.enabled and fullname not in settings.strip_exception_messages.allowlist: + message = STRIP_EXCEPTION_MESSAGE + else: + message = message_raw # Where expected or ignore are a callable they should return a # tri-state variable with the following behavior. @@ -362,7 +363,7 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c return fullname, message, message_raw, tb, is_expected - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None, message=None): + def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): attributes = attributes if attributes is not None else {} # If no exception details provided, use current exception. @@ -382,7 +383,6 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, ignore=ignore, expected=expected, status_code=status_code, - message=message, ) if recorded: fullname, message, message_raw, tb, is_expected = recorded diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index ebebe7dbe1..e5c39a2df2 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -724,6 +724,11 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, module, name, fullnames, message_raw = parse_exc_info(error) fullname = fullnames[0] + # In the case case of JSON formatting for OpenAI models + # this will result in a "cleaner" message format + if getattr(value, "_nr_message", None): + message_raw = value._nr_message + # Check to see if we need to strip the message before recording it. if settings.strip_exception_messages.enabled and fullname not in settings.strip_exception_messages.allowlist: diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 5197c20bd0..336b35ecc9 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -41,7 +41,7 @@ def openai_error_attributes(exception, request_args): "ingest_source": "Python", "response.organization": getattr(exception, "organization", ""), "response.number_of_messages": number_of_messages, - "status_code": getattr(exception, "http_status", ""), + "http.statusCode": getattr(exception, "http_status", ""), "error.message": getattr(exception, "_message", ""), "error.code": getattr(getattr(exception, "error", ""), "code", ""), "error.param": getattr(exception, "param", ""), @@ -62,12 +62,8 @@ def wrap_embedding_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - status_code = error_attributes.pop("status_code") - # Usually the status_code is only added to an external error trace's attrs. - ft._add_agent_attribute("http.statusCode", status_code) + exc._nr_message = error_attributes.pop("error.message") ft.notice_error( - status_code=status_code, - message=error_attributes.pop("error.message"), attributes=error_attributes, ) raise @@ -143,12 +139,8 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - status_code = error_attributes.pop("status_code") - # Usually the status_code is only added to an external error trace's attrs. - ft._add_agent_attribute("http.statusCode", status_code) + exc._nr_message = error_attributes.pop("error.message") ft.notice_error( - status_code=status_code, - message=error_attributes.pop("error.message"), attributes=error_attributes, ) raise @@ -300,12 +292,8 @@ async def wrap_embedding_acreate(wrapped, instance, args, kwargs): response = await wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - status_code = error_attributes.pop("status_code") - # Usually the status_code is only added to an external error trace's attrs. - ft._add_agent_attribute("http.statusCode", status_code) + exc._nr_message = error_attributes.pop("error.message") ft.notice_error( - status_code=status_code, - message=error_attributes.pop("error.message"), attributes=error_attributes, ) raise @@ -385,12 +373,8 @@ async def wrap_chat_completion_acreate(wrapped, instance, args, kwargs): response = await wrapped(*args, **kwargs) except Exception as exc: error_attributes = openai_error_attributes(exc, kwargs) - status_code = error_attributes.pop("status_code") - # Usually the status_code is only added to an external error trace's attrs. - ft._add_agent_attribute("http.statusCode", status_code) + exc._nr_message = error_attributes.pop("error.message") ft.notice_error( - status_code=status_code, - message=error_attributes.pop("error.message"), attributes=error_attributes, ) raise diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index aeb96822b3..c271520ca8 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -97,13 +97,13 @@ def test_chat_completion_invalid_request_error_no_model(): "ingest_source": "Python", "response.number_of_messages": 1, "error.code": "model_not_found", + "http.statusCode": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - "http.statusCode": 404, } ) @background_task() @@ -170,13 +170,13 @@ def test_chat_completion_authentication_error(monkeypatch): "vendor": "openAI", "ingest_source": "Python", "response.number_of_messages": 1, + "http.statusCode": 401, }, }, ) @validate_span_events( exact_agents={ "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", - "http.statusCode": 401, } ) @background_task() @@ -250,13 +250,13 @@ def test_chat_completion_invalid_request_error_no_model_async(loop): "ingest_source": "Python", "response.number_of_messages": 1, "error.code": "model_not_found", + "http.statusCode": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - "http.statusCode": 404, } ) @background_task() @@ -324,13 +324,13 @@ def test_chat_completion_authentication_error_async(loop, monkeypatch): "vendor": "openAI", "ingest_source": "Python", "response.number_of_messages": 1, + "http.statusCode": 401, }, }, ) @validate_span_events( exact_agents={ "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", - "http.statusCode": 401, } ) @background_task() diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py index e32ddcad24..07b5b243e3 100644 --- a/tests/mlmodel_openai/test_embeddings_error.py +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -84,13 +84,14 @@ def test_embeddings_invalid_request_error_no_model(): "vendor": "openAI", "ingest_source": "Python", "error.code": "model_not_found", + "http.statusCode": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - "http.statusCode": 404, + # "http.statusCode": 404, } ) @background_task() @@ -141,13 +142,13 @@ def test_embeddings_authentication_error(monkeypatch): "request.model": "text-embedding-ada-002", "vendor": "openAI", "ingest_source": "Python", + "http.statusCode": 401, }, }, ) @validate_span_events( exact_agents={ "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", - "http.statusCode": 401, } ) @background_task() @@ -208,13 +209,13 @@ def test_embeddings_invalid_request_error_no_model_async(loop): "vendor": "openAI", "ingest_source": "Python", "error.code": "model_not_found", + "http.statusCode": 404, }, }, ) @validate_span_events( exact_agents={ "error.message": "The model `does-not-exist` does not exist", - "http.statusCode": 404, } ) @background_task() @@ -265,13 +266,13 @@ def test_embeddings_authentication_error_async(loop, monkeypatch): "request.model": "text-embedding-ada-002", "vendor": "openAI", "ingest_source": "Python", + "http.statusCode": 401, }, }, ) @validate_span_events( exact_agents={ "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", - "http.statusCode": 401, } ) @background_task() From f216d9f32f1de430a77a7eb3acf110cbb47d7ad0 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Tue, 31 Oct 2023 09:31:17 -0700 Subject: [PATCH 16/20] Remove enabled_ml_settings as not needed --- .../test_chat_completion_error.py | 21 +----------------- tests/mlmodel_openai/test_embeddings_error.py | 22 +------------------ 2 files changed, 2 insertions(+), 41 deletions(-) diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index c271520ca8..c826b0b324 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -14,11 +14,7 @@ import openai import pytest -from testing_support.fixtures import ( - dt_enabled, - override_application_settings, - reset_core_stats_engine, -) +from testing_support.fixtures import dt_enabled, reset_core_stats_engine from testing_support.validators.validate_error_trace_attributes import ( validate_error_trace_attributes, ) @@ -27,13 +23,6 @@ from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name -enabled_ml_settings = { - "machine_learning.enabled": True, - "machine_learning.inference_events_value.enabled": True, - "ml_insights_events.enabled": True, - "error_collector.ignore_status_codes": set(), -} - _test_openai_chat_completion_messages = ( {"role": "system", "content": "You are a scientist."}, {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, @@ -44,7 +33,6 @@ # No model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -80,7 +68,6 @@ def test_chat_completion_invalid_request_error_no_model(): # Invalid model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -118,7 +105,6 @@ def test_chat_completion_invalid_request_error_invalid_model(): # No api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -154,7 +140,6 @@ def test_chat_completion_authentication_error(monkeypatch): # Wrong api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -195,7 +180,6 @@ def test_chat_completion_wrong_api_key_error(monkeypatch): # No model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -233,7 +217,6 @@ def test_chat_completion_invalid_request_error_no_model_async(loop): # Invalid model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -273,7 +256,6 @@ def test_chat_completion_invalid_request_error_invalid_model_async(loop): # No api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -308,7 +290,6 @@ def test_chat_completion_authentication_error_async(loop, monkeypatch): # Wrong api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py index 07b5b243e3..35d189ff50 100644 --- a/tests/mlmodel_openai/test_embeddings_error.py +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -14,11 +14,7 @@ import openai import pytest -from testing_support.fixtures import ( - dt_enabled, - override_application_settings, - reset_core_stats_engine, -) +from testing_support.fixtures import dt_enabled, reset_core_stats_engine from testing_support.validators.validate_error_trace_attributes import ( validate_error_trace_attributes, ) @@ -27,19 +23,10 @@ from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name -enabled_ml_settings = { - "machine_learning.enabled": True, - "machine_learning.inference_events_value.enabled": True, - "ml_insights_events.enabled": True, - "error_collector.ignore_status_codes": set(), -} - - # Sync tests: # No model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -70,7 +57,6 @@ def test_embeddings_invalid_request_error_no_model(): # Invalid model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -101,7 +87,6 @@ def test_embeddings_invalid_request_error_invalid_model(): # No api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -129,7 +114,6 @@ def test_embeddings_authentication_error(monkeypatch): # Wrong api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -162,7 +146,6 @@ def test_embeddings_wrong_api_key_error(monkeypatch): # No model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -195,7 +178,6 @@ def test_embeddings_invalid_request_error_no_model_async(loop): # Invalid model provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -225,7 +207,6 @@ def test_embeddings_invalid_request_error_invalid_model_async(loop): # No api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( @@ -253,7 +234,6 @@ def test_embeddings_authentication_error_async(loop, monkeypatch): # Wrong api_key provided -@override_application_settings(enabled_ml_settings) @dt_enabled @reset_core_stats_engine() @validate_error_trace_attributes( From ea5ff918439fd904c862740ea47f703b6df1d4c3 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Tue, 31 Oct 2023 09:50:04 -0700 Subject: [PATCH 17/20] Add stats engine _nr_message test --- .../agent_features/test_exception_messages.py | 95 +++++++++++++------ 1 file changed, 66 insertions(+), 29 deletions(-) diff --git a/tests/agent_features/test_exception_messages.py b/tests/agent_features/test_exception_messages.py index e9944f9205..e3c653e480 100644 --- a/tests/agent_features/test_exception_messages.py +++ b/tests/agent_features/test_exception_messages.py @@ -13,29 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import pytest +import six +from testing_support.fixtures import ( + reset_core_stats_engine, + set_default_encoding, + validate_application_exception_message, + validate_transaction_exception_message, +) from newrelic.api.application import application_instance as application from newrelic.api.background_task import background_task from newrelic.api.time_trace import notice_error -from testing_support.fixtures import (validate_transaction_exception_message, - set_default_encoding, validate_application_exception_message, - reset_core_stats_engine) - -UNICODE_MESSAGE = u'I💜🐍' -UNICODE_ENGLISH = u'I love python' -BYTES_ENGLISH = b'I love python' -BYTES_UTF8_ENCODED = b'I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d' -INCORRECTLY_DECODED_BYTES_PY2 = u'I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d' -INCORRECTLY_DECODED_BYTES_PY3 = u"b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" +UNICODE_MESSAGE = "I💜🐍" +UNICODE_ENGLISH = "I love python" +BYTES_ENGLISH = b"I love python" +BYTES_UTF8_ENCODED = b"I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d" +INCORRECTLY_DECODED_BYTES_PY2 = "I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d" +INCORRECTLY_DECODED_BYTES_PY3 = "b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" # =================== Exception messages during transaction ==================== # ---------------- Python 2 + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_unicode(): @@ -46,8 +49,9 @@ def test_py2_transaction_exception_message_unicode(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() def test_py2_transaction_exception_message_unicode_english(): @@ -58,8 +62,9 @@ def test_py2_transaction_exception_message_unicode_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() def test_py2_transaction_exception_message_bytes_english(): @@ -69,8 +74,9 @@ def test_py2_transaction_exception_message_bytes_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) @background_task() def test_py2_transaction_exception_message_bytes_non_english(): @@ -83,8 +89,9 @@ def test_py2_transaction_exception_message_bytes_non_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) @background_task() def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english(): @@ -97,12 +104,13 @@ def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english() # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_unicode_utf8_encoding(): @@ -114,8 +122,9 @@ def test_py2_transaction_exception_message_unicode_utf8_encoding(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): @@ -127,12 +136,14 @@ def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: notice_error() + # ---------------- Python 3 + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() @@ -144,6 +155,7 @@ def test_py3_transaction_exception_message_bytes_non_english_unicode(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() @@ -155,6 +167,7 @@ def test_py3_transaction_exception_message_unicode_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY3) @background_task() @@ -171,13 +184,15 @@ def test_py3_transaction_exception_message_bytes_non_english(): except ValueError: notice_error() + # =================== Exception messages outside transaction ==================== # ---------------- Python 2 + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_unicode(): """Assert unicode message when using non-ascii characters is preserved, @@ -188,9 +203,10 @@ def test_py2_application_exception_message_unicode(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_ENGLISH) def test_py2_application_exception_message_unicode_english(): """Assert unicode message when using ascii compatible characters preserved, @@ -201,9 +217,10 @@ def test_py2_application_exception_message_unicode_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_ENGLISH) def test_py2_application_exception_message_bytes_english(): """Assert byte string of ascii characters decodes sensibly""" @@ -213,9 +230,10 @@ def test_py2_application_exception_message_bytes_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) def test_py2_application_exception_message_bytes_non_english(): """Assert known situation where (explicitly) utf-8 encoded byte string gets @@ -228,9 +246,10 @@ def test_py2_application_exception_message_bytes_non_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) def test_py2_application_exception_message_bytes_implicit_encoding_non_english(): """Assert known situation where (implicitly) utf-8 encoded byte string gets @@ -242,14 +261,15 @@ def test_py2_application_exception_message_bytes_implicit_encoding_non_english() # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_unicode_utf8_encoding(): """Assert unicode error message is preserved with sys non-default utf-8 @@ -261,9 +281,10 @@ def test_py2_application_exception_message_unicode_utf8_encoding(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_bytes_utf8_encoding_non_english(): """Assert utf-8 encoded byte produces correct exception message when sys @@ -274,13 +295,15 @@ def test_py2_application_exception_message_bytes_utf8_encoding_non_english(): # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: app = application() notice_error(application=app) + # ---------------- Python 3 + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_MESSAGE) @@ -293,6 +316,7 @@ def test_py3_application_exception_message_bytes_non_english_unicode(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_ENGLISH) @@ -305,6 +329,7 @@ def test_py3_application_exception_message_unicode_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY3) @@ -321,3 +346,15 @@ def test_py3_application_exception_message_bytes_non_english(): except ValueError: app = application() notice_error(application=app) + + +@reset_core_stats_engine() +@validate_application_exception_message("My custom message") +def test_nr_message_exception_attr_override(): + """Override the message using the _nr_message attribute.""" + try: + raise ValueError("Original error message") + except ValueError as e: + e._nr_message = "My custom message" + app = application() + notice_error(application=app) From e2ab33cbb215b2b9c27579939cdc52fccb7ec1ac Mon Sep 17 00:00:00 2001 From: hmstepanek Date: Tue, 31 Oct 2023 16:51:38 +0000 Subject: [PATCH 18/20] [Mega-Linter] Apply linters fixes --- tests/agent_features/test_exception_messages.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/agent_features/test_exception_messages.py b/tests/agent_features/test_exception_messages.py index e3c653e480..154b14e33f 100644 --- a/tests/agent_features/test_exception_messages.py +++ b/tests/agent_features/test_exception_messages.py @@ -100,7 +100,6 @@ def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english() MESSAGE IS WRONG. We do not expect it to work now, or in the future. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 @@ -132,7 +131,6 @@ def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): encoding is also utf-8. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 @@ -257,7 +255,6 @@ def test_py2_application_exception_message_bytes_implicit_encoding_non_english() MESSAGE IS WRONG. We do not expect it to work now, or in the future. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 @@ -291,7 +288,6 @@ def test_py2_application_exception_message_bytes_utf8_encoding_non_english(): encoding is also utf-8. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 From baa3a89ee83497e9bf3733750e0c133cda6ffd66 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Tue, 31 Oct 2023 09:57:03 -0700 Subject: [PATCH 19/20] Trigger tests From 9beeae3defc894ca0380e48a7dc37bf5153b5a35 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Tue, 31 Oct 2023 11:52:07 -0700 Subject: [PATCH 20/20] Revert black formatting in unicode/byte messages --- .../agent_features/test_exception_messages.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/agent_features/test_exception_messages.py b/tests/agent_features/test_exception_messages.py index 154b14e33f..55ff30cac9 100644 --- a/tests/agent_features/test_exception_messages.py +++ b/tests/agent_features/test_exception_messages.py @@ -26,12 +26,18 @@ from newrelic.api.background_task import background_task from newrelic.api.time_trace import notice_error -UNICODE_MESSAGE = "I💜🐍" -UNICODE_ENGLISH = "I love python" -BYTES_ENGLISH = b"I love python" -BYTES_UTF8_ENCODED = b"I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d" -INCORRECTLY_DECODED_BYTES_PY2 = "I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d" -INCORRECTLY_DECODED_BYTES_PY3 = "b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" +# Turn off black formatting for this section of the code. +# While Python 2 has been EOL'd since 2020, New Relic still +# supports it and therefore these messages need to keep this +# specific formatting. +# fmt: off +UNICODE_MESSAGE = u'I💜🐍' +UNICODE_ENGLISH = u'I love python' +BYTES_ENGLISH = b'I love python' +BYTES_UTF8_ENCODED = b'I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d' +INCORRECTLY_DECODED_BYTES_PY2 = u'I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d' +INCORRECTLY_DECODED_BYTES_PY3 = u"b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" +# fmt: on # =================== Exception messages during transaction ==================== # ---------------- Python 2