Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions tests/mlmodel_openai/_mock_external_openai_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,33 @@
# 3) This app runs on a separate thread meaning it won't block the test app.

RESPONSES = {
"Invalid API key.": (
{"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"},
401,
{
"error": {
"message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.",
"type": "invalid_request_error",
"param": "null",
"code": "invalid_api_key",
}
},
),
"Model does not exist.": (
{
"Content-Type": "application/json",
"x-request-id": "cfdf51fb795362ae578c12a21796262c",
},
404,
{
"error": {
"message": "The model `does-not-exist` does not exist",
"type": "invalid_request_error",
"param": "null",
"code": "model_not_found",
}
},
),
"This is an embedding test.": (
{
"Content-Type": "application/json",
Expand All @@ -42,6 +69,7 @@
"x-ratelimit-reset-tokens": "2ms",
"x-request-id": "c70828b2293314366a76a2b1dcb20688",
},
200,
{
"data": [
{
Expand Down Expand Up @@ -70,6 +98,7 @@
"x-ratelimit-reset-tokens": "90ms",
"x-request-id": "49dbbffbd3c3f4612aa48def69059ccd",
},
200,
{
"choices": [
{
Expand Down Expand Up @@ -105,7 +134,7 @@ def simple_get(self):
headers, response = ({}, "")
for k, v in RESPONSES.items():
if prompt.startswith(k):
headers, response = v
headers, status_code, response = v
break
else: # If no matches found
self.send_response(500)
Expand All @@ -114,7 +143,7 @@ def simple_get(self):
return

# Send response code
self.send_response(200)
self.send_response(status_code)

# Send headers
for k, v in headers.items():
Expand Down
26 changes: 25 additions & 1 deletion tests/mlmodel_openai/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ def openai_server():

# Apply function wrappers to record data
wrap_function_wrapper("openai.api_requestor", "APIRequestor.request", wrap_openai_api_requestor_request)
wrap_function_wrapper(
"openai.api_requestor", "APIRequestor._interpret_response", wrap_openai_api_requestor_interpret_response
)
yield # Run tests

# Write responses to audit log
Expand All @@ -101,6 +104,23 @@ def openai_server():
RECORDED_HEADERS = set(["x-request-id", "content-type"])


def wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwargs):
rbody, rcode, rheaders = bind_request_interpret_response_params(*args, **kwargs)
headers = dict(
filter(
lambda k: k[0].lower() in RECORDED_HEADERS
or k[0].lower().startswith("openai")
or k[0].lower().startswith("x-ratelimit"),
rheaders.items(),
)
)

if rcode >= 400 or rcode < 200:
rbody = json.loads(rbody)
OPENAI_AUDIT_LOG_CONTENTS["error"] = headers, rcode, rbody # Append response data to audit log
return wrapped(*args, **kwargs)


def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs):
params = bind_request_params(*args, **kwargs)
if not params:
Expand All @@ -124,9 +144,13 @@ def wrap_openai_api_requestor_request(wrapped, instance, args, kwargs):
)

# Log response
OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log
OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, result.http_status, data # Append response data to audit log
return result


def bind_request_params(method, url, params=None, *args, **kwargs):
return params


def bind_request_interpret_response_params(result, stream):
return result.content.decode("utf-8"), result.status_code, result.headers
58 changes: 58 additions & 0 deletions tests/mlmodel_openai/test_error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import openai
import pytest

from newrelic.api.background_task import background_task

enabled_ml_settings = {
"machine_learning.enabled": True,
"machine_learning.inference_events_value.enabled": True,
"ml_insights_events.enabled": True,
}

_test_openai_chat_completion_messages = (
{"role": "system", "content": "You are a scientist."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
)


@background_task()
def test_invalid_request_error_model_does_not_exist():
with pytest.raises(openai.InvalidRequestError):
openai.ChatCompletion.create(
model="does-not-exist",
messages=(
{"role": "system", "content": "Model does not exist."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
),
temperature=0.7,
max_tokens=100,
)


@background_task()
def test_authentication_error_invalid_api_key(monkeypatch):
monkeypatch.setattr(openai, "api_key", "InvalidKey")
with pytest.raises(openai.error.AuthenticationError):
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=(
{"role": "system", "content": "Invalid API key."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
),
temperature=0.7,
max_tokens=100,
)