|
13 | 13 | # limitations under the License. |
14 | 14 |
|
15 | 15 | import openai |
| 16 | +import pytest |
| 17 | +from testing_support.fixtures import ( # function_not_called,; override_application_settings, |
| 18 | + function_not_called, |
| 19 | + override_application_settings, |
| 20 | + reset_core_stats_engine, |
| 21 | +) |
| 22 | + |
| 23 | +from newrelic.api.time_trace import current_trace |
| 24 | +from newrelic.api.transaction import current_transaction, add_custom_attribute |
| 25 | +from testing_support.validators.validate_ml_event_count import validate_ml_event_count |
| 26 | +from testing_support.validators.validate_ml_event_payload import ( |
| 27 | + validate_ml_event_payload, |
| 28 | +) |
| 29 | +from testing_support.validators.validate_ml_events import validate_ml_events |
| 30 | +from testing_support.validators.validate_ml_events_outside_transaction import ( |
| 31 | + validate_ml_events_outside_transaction, |
| 32 | +) |
| 33 | + |
| 34 | +import newrelic.core.otlp_utils |
| 35 | +from newrelic.api.application import application_instance as application |
| 36 | +from newrelic.api.background_task import background_task |
| 37 | +from newrelic.api.transaction import record_ml_event |
| 38 | +from newrelic.core.config import global_settings |
| 39 | +from newrelic.packages import six |
16 | 40 |
|
17 | 41 | _test_openai_chat_completion_sync_messages = ( |
18 | 42 | {"role": "system", "content": "You are a scientist."}, |
19 | | - {"role": "user", "content": "What is the boiling point of water?"}, |
20 | | - {"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, |
| 43 | + #{"role": "user", "content": "What is the boiling point of water?"}, |
| 44 | + #{"role": "assistant", "content": "The boiling point of water is 212 degrees Fahrenheit."}, |
21 | 45 | {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, |
22 | 46 | ) |
23 | 47 |
|
24 | 48 |
|
25 | | -def test_openai_chat_completion_sync(): |
| 49 | +def set_trace_info(): |
| 50 | + txn = current_transaction() |
| 51 | + if txn: |
| 52 | + txn._trace_id = "trace-id" |
| 53 | + trace = current_trace() |
| 54 | + if trace: |
| 55 | + trace.guid = "span-id" |
| 56 | + |
| 57 | + |
| 58 | +sync_chat_completion_recorded_events = [ |
| 59 | + ( |
| 60 | + {'type': 'LlmChatCompletionSummary'}, |
| 61 | + { |
| 62 | + 'id': None, # UUID that varies with each run |
| 63 | + 'appName': 'Python Agent Test (mlmodel_openai)', |
| 64 | + 'conversation_id': 'my-awesome-id', |
| 65 | + 'transaction_id': None, |
| 66 | + 'span_id': "span-id", |
| 67 | + 'trace_id': "trace-id", |
| 68 | + 'api_key_last_four_digits': 'sk-CRET', |
| 69 | + 'response_time': None, # Response time varies each test run |
| 70 | + 'request.model': 'gpt-3.5-turbo', |
| 71 | + 'response.model': 'gpt-3.5-turbo-0613', |
| 72 | + 'response.organization': 'new-relic-nkmd8b', |
| 73 | + 'response.usage.completion_tokens': 11, |
| 74 | + 'response.usage.total_tokens': 64, |
| 75 | + 'response.usage.prompt_tokens': 53, |
| 76 | + 'request.temperature': 0.7, |
| 77 | + 'request.max_tokens': 100, |
| 78 | + 'response.choices.finish_reason': 'stop', |
| 79 | + 'response.api_type': 'None', |
| 80 | + 'response.headers.llmVersion': '2020-10-01', |
| 81 | + 'response.headers.ratelimitLimitRequests': 200, |
| 82 | + 'response.headers.ratelimitLimitTokens': 40000, |
| 83 | + 'response.headers.ratelimitResetTokens': "90ms", |
| 84 | + 'response.headers.ratelimitResetRequests': "7m12s", |
| 85 | + 'response.headers.ratelimitRemainingTokens': 39940, |
| 86 | + 'response.headers.ratelimitRemainingRequests': 199, |
| 87 | + 'vendor': 'openAI', |
| 88 | + 'ingest_source': 'Python', |
| 89 | + 'number_of_messages': 3, |
| 90 | + 'api_version': '2020-10-01' |
| 91 | + }, |
| 92 | + ), |
| 93 | + ( |
| 94 | + {'type': 'LlmChatCompletionMessage'}, |
| 95 | + { |
| 96 | + 'id': None, |
| 97 | + 'span_id': "span-id", |
| 98 | + 'trace_id': "trace-id", |
| 99 | + 'transaction_id': None, |
| 100 | + 'content': 'You are a scientist.', |
| 101 | + 'role': 'system', |
| 102 | + 'completion_id': None, |
| 103 | + 'sequence': 0, |
| 104 | + 'model': 'gpt-3.5-turbo-0613', |
| 105 | + 'vendor': 'openAI', |
| 106 | + 'ingest_source': 'Python' |
| 107 | + }, |
| 108 | + ), |
| 109 | + ( |
| 110 | + {'type': 'LlmChatCompletionMessage'}, |
| 111 | + { |
| 112 | + 'id': None, |
| 113 | + 'span_id': "span-id", |
| 114 | + 'trace_id': "trace-id", |
| 115 | + 'transaction_id': None, |
| 116 | + 'content': 'What is 212 degrees Fahrenheit converted to Celsius?', |
| 117 | + 'role': 'user', |
| 118 | + 'completion_id': None, |
| 119 | + 'sequence': 1, |
| 120 | + 'model': 'gpt-3.5-turbo-0613', |
| 121 | + 'vendor': 'openAI', |
| 122 | + 'ingest_source': 'Python' |
| 123 | + }, |
| 124 | + ), |
| 125 | + ( |
| 126 | + {'type': 'LlmChatCompletionMessage'}, |
| 127 | + { |
| 128 | + 'id': None, |
| 129 | + 'span_id': "span-id", |
| 130 | + 'trace_id': "trace-id", |
| 131 | + 'transaction_id': None, |
| 132 | + 'content': '212 degrees Fahrenheit is equal to 100 degrees Celsius.', |
| 133 | + 'role': 'assistant', |
| 134 | + 'completion_id': None, |
| 135 | + 'sequence': 2, |
| 136 | + 'model': 'gpt-3.5-turbo-0613', |
| 137 | + 'vendor': 'openAI', |
| 138 | + 'ingest_source': 'Python' |
| 139 | + } |
| 140 | + ), |
| 141 | +] |
| 142 | + |
| 143 | + |
| 144 | +@reset_core_stats_engine() |
| 145 | +@validate_ml_events(sync_chat_completion_recorded_events) |
| 146 | +# One summary event, one system message, one user message, and one response message from the assistant |
| 147 | +@validate_ml_event_count(count=4) |
| 148 | +@background_task() |
| 149 | +def test_openai_chat_completion_sync_in_txn(): |
| 150 | + set_trace_info() |
| 151 | + add_custom_attribute("conversation_id", "my-awesome-id") |
| 152 | + openai.ChatCompletion.create( |
| 153 | + model="gpt-3.5-turbo", |
| 154 | + messages=_test_openai_chat_completion_sync_messages, |
| 155 | + temperature=0.7, |
| 156 | + max_tokens=100 |
| 157 | + ) |
| 158 | + |
| 159 | + |
| 160 | +@reset_core_stats_engine() |
| 161 | +# One summary event, one system message, one user message, and one response message from the assistant |
| 162 | +@validate_ml_event_count(count=0) |
| 163 | +def test_openai_chat_completion_sync_outside_txn(): |
| 164 | + set_trace_info() |
| 165 | + add_custom_attribute("conversation_id", "my-awesome-id") |
| 166 | + openai.ChatCompletion.create( |
| 167 | + model="gpt-3.5-turbo", |
| 168 | + messages=_test_openai_chat_completion_sync_messages, |
| 169 | + temperature=0.7, |
| 170 | + max_tokens=100 |
| 171 | + ) |
| 172 | + |
| 173 | + |
| 174 | +disabled_ml_settings = { |
| 175 | + "machine_learning.enabled": False, |
| 176 | + "machine_learning.inference_events_value.enabled": False, |
| 177 | + "ml_insights_events.enabled": False |
| 178 | +} |
| 179 | + |
| 180 | + |
| 181 | +@override_application_settings(disabled_ml_settings) |
| 182 | +@reset_core_stats_engine() |
| 183 | +# One summary event, one system message, one user message, and one response message from the assistant |
| 184 | +@validate_ml_event_count(count=0) |
| 185 | +def test_openai_chat_completion_sync_disabled_settings(): |
| 186 | + set_trace_info() |
26 | 187 | openai.ChatCompletion.create( |
27 | 188 | model="gpt-3.5-turbo", |
28 | 189 | messages=_test_openai_chat_completion_sync_messages, |
| 190 | + temperature=0.7, |
| 191 | + max_tokens=100 |
29 | 192 | ) |
30 | 193 |
|
31 | 194 |
|
|
0 commit comments