Skip to content

Commit 54f6bc6

Browse files
stainless-app[bot]Stainless Bot
authored andcommitted
feat(api): OpenAPI spec update via Stainless API (#170)
1 parent 899a45b commit 54f6bc6

11 files changed

+380
-1
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 21
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-441451c27073e45d1bdc832c5b66c26d90bd185bd94bd461b91257fbf0987ef2.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-2203adbcfccbd119a54a1fe9a29de02b1993184022804cfa4bc32947c8c1eb06.yml

src/prompt_foundry_python_sdk/resources/evaluation_assertions.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def create(
5252
type: Literal[
5353
"CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"
5454
],
55+
weight: float | NotGiven = NOT_GIVEN,
5556
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
5657
# The extra values given here take precedence over values defined on the client or passed to this method.
5758
extra_headers: Headers | None = None,
@@ -71,6 +72,8 @@ def create(
7172
7273
type: The type of evaluation matcher to use.
7374
75+
weight: How heavily to weigh the assertion within the evaluation.
76+
7477
extra_headers: Send extra headers
7578
7679
extra_query: Add additional query parameters to the request
@@ -88,6 +91,7 @@ def create(
8891
"target_value": target_value,
8992
"tool_name": tool_name,
9093
"type": type,
94+
"weight": weight,
9195
},
9296
evaluation_assertion_create_params.EvaluationAssertionCreateParams,
9397
),
@@ -108,6 +112,7 @@ def update(
108112
type: Literal[
109113
"CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"
110114
],
115+
weight: float | NotGiven = NOT_GIVEN,
111116
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
112117
# The extra values given here take precedence over values defined on the client or passed to this method.
113118
extra_headers: Headers | None = None,
@@ -127,6 +132,8 @@ def update(
127132
128133
type: The type of evaluation matcher to use.
129134
135+
weight: How heavily to weigh the assertion within the evaluation.
136+
130137
extra_headers: Send extra headers
131138
132139
extra_query: Add additional query parameters to the request
@@ -146,6 +153,7 @@ def update(
146153
"target_value": target_value,
147154
"tool_name": tool_name,
148155
"type": type,
156+
"weight": weight,
149157
},
150158
evaluation_assertion_update_params.EvaluationAssertionUpdateParams,
151159
),
@@ -280,6 +288,7 @@ async def create(
280288
type: Literal[
281289
"CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"
282290
],
291+
weight: float | NotGiven = NOT_GIVEN,
283292
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
284293
# The extra values given here take precedence over values defined on the client or passed to this method.
285294
extra_headers: Headers | None = None,
@@ -299,6 +308,8 @@ async def create(
299308
300309
type: The type of evaluation matcher to use.
301310
311+
weight: How heavily to weigh the assertion within the evaluation.
312+
302313
extra_headers: Send extra headers
303314
304315
extra_query: Add additional query parameters to the request
@@ -316,6 +327,7 @@ async def create(
316327
"target_value": target_value,
317328
"tool_name": tool_name,
318329
"type": type,
330+
"weight": weight,
319331
},
320332
evaluation_assertion_create_params.EvaluationAssertionCreateParams,
321333
),
@@ -336,6 +348,7 @@ async def update(
336348
type: Literal[
337349
"CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"
338350
],
351+
weight: float | NotGiven = NOT_GIVEN,
339352
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
340353
# The extra values given here take precedence over values defined on the client or passed to this method.
341354
extra_headers: Headers | None = None,
@@ -355,6 +368,8 @@ async def update(
355368
356369
type: The type of evaluation matcher to use.
357370
371+
weight: How heavily to weigh the assertion within the evaluation.
372+
358373
extra_headers: Send extra headers
359374
360375
extra_query: Add additional query parameters to the request
@@ -374,6 +389,7 @@ async def update(
374389
"target_value": target_value,
375390
"tool_name": tool_name,
376391
"type": type,
392+
"weight": weight,
377393
},
378394
evaluation_assertion_update_params.EvaluationAssertionUpdateParams,
379395
),

src/prompt_foundry_python_sdk/resources/evaluations.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@ def create(
4343
appended_messages: Iterable[evaluation_create_params.AppendedMessage],
4444
prompt_id: str,
4545
variables: Dict[str, Optional[str]],
46+
threshold: float | NotGiven = NOT_GIVEN,
47+
weight: float | NotGiven = NOT_GIVEN,
4648
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
4749
# The extra values given here take precedence over values defined on the client or passed to this method.
4850
extra_headers: Headers | None = None,
@@ -58,6 +60,10 @@ def create(
5860
5961
variables: The template variables added to the prompt when executing the prompt.
6062
63+
threshold: What percentage of assertions must pass for the evaluation to pass.
64+
65+
weight: How heavily to weigh the evaluation within the prompt.
66+
6167
extra_headers: Send extra headers
6268
6369
extra_query: Add additional query parameters to the request
@@ -73,6 +79,8 @@ def create(
7379
"appended_messages": appended_messages,
7480
"prompt_id": prompt_id,
7581
"variables": variables,
82+
"threshold": threshold,
83+
"weight": weight,
7684
},
7785
evaluation_create_params.EvaluationCreateParams,
7886
),
@@ -89,6 +97,8 @@ def update(
8997
appended_messages: Iterable[evaluation_update_params.AppendedMessage],
9098
prompt_id: str,
9199
variables: Dict[str, Optional[str]],
100+
threshold: float | NotGiven = NOT_GIVEN,
101+
weight: float | NotGiven = NOT_GIVEN,
92102
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
93103
# The extra values given here take precedence over values defined on the client or passed to this method.
94104
extra_headers: Headers | None = None,
@@ -104,6 +114,10 @@ def update(
104114
105115
variables: The template variables added to the prompt when executing the prompt.
106116
117+
threshold: What percentage of assertions must pass for the evaluation to pass.
118+
119+
weight: How heavily to weigh the evaluation within the prompt.
120+
107121
extra_headers: Send extra headers
108122
109123
extra_query: Add additional query parameters to the request
@@ -121,6 +135,8 @@ def update(
121135
"appended_messages": appended_messages,
122136
"prompt_id": prompt_id,
123137
"variables": variables,
138+
"threshold": threshold,
139+
"weight": weight,
124140
},
125141
evaluation_update_params.EvaluationUpdateParams,
126142
),
@@ -231,6 +247,8 @@ async def create(
231247
appended_messages: Iterable[evaluation_create_params.AppendedMessage],
232248
prompt_id: str,
233249
variables: Dict[str, Optional[str]],
250+
threshold: float | NotGiven = NOT_GIVEN,
251+
weight: float | NotGiven = NOT_GIVEN,
234252
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
235253
# The extra values given here take precedence over values defined on the client or passed to this method.
236254
extra_headers: Headers | None = None,
@@ -246,6 +264,10 @@ async def create(
246264
247265
variables: The template variables added to the prompt when executing the prompt.
248266
267+
threshold: What percentage of assertions must pass for the evaluation to pass.
268+
269+
weight: How heavily to weigh the evaluation within the prompt.
270+
249271
extra_headers: Send extra headers
250272
251273
extra_query: Add additional query parameters to the request
@@ -261,6 +283,8 @@ async def create(
261283
"appended_messages": appended_messages,
262284
"prompt_id": prompt_id,
263285
"variables": variables,
286+
"threshold": threshold,
287+
"weight": weight,
264288
},
265289
evaluation_create_params.EvaluationCreateParams,
266290
),
@@ -277,6 +301,8 @@ async def update(
277301
appended_messages: Iterable[evaluation_update_params.AppendedMessage],
278302
prompt_id: str,
279303
variables: Dict[str, Optional[str]],
304+
threshold: float | NotGiven = NOT_GIVEN,
305+
weight: float | NotGiven = NOT_GIVEN,
280306
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
281307
# The extra values given here take precedence over values defined on the client or passed to this method.
282308
extra_headers: Headers | None = None,
@@ -292,6 +318,10 @@ async def update(
292318
293319
variables: The template variables added to the prompt when executing the prompt.
294320
321+
threshold: What percentage of assertions must pass for the evaluation to pass.
322+
323+
weight: How heavily to weigh the evaluation within the prompt.
324+
295325
extra_headers: Send extra headers
296326
297327
extra_query: Add additional query parameters to the request
@@ -309,6 +339,8 @@ async def update(
309339
"appended_messages": appended_messages,
310340
"prompt_id": prompt_id,
311341
"variables": variables,
342+
"threshold": threshold,
343+
"weight": weight,
312344
},
313345
evaluation_update_params.EvaluationUpdateParams,
314346
),

src/prompt_foundry_python_sdk/types/evaluation.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,3 +99,9 @@ class Evaluation(BaseModel):
9999

100100
variables: Dict[str, Optional[str]]
101101
"""The template variables added to the prompt when executing the prompt."""
102+
103+
threshold: Optional[float] = None
104+
"""What percentage of assertions must pass for the evaluation to pass."""
105+
106+
weight: Optional[float] = None
107+
"""How heavily to weigh the evaluation within the prompt."""

src/prompt_foundry_python_sdk/types/evaluation_assertion.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,6 @@ class EvaluationAssertion(BaseModel):
3131

3232
type: Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"]
3333
"""The type of evaluation matcher to use."""
34+
35+
weight: Optional[float] = None
36+
"""How heavily to weigh the assertion within the evaluation."""

src/prompt_foundry_python_sdk/types/evaluation_assertion_create_params.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,6 @@ class EvaluationAssertionCreateParams(TypedDict, total=False):
3131
Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"]
3232
]
3333
"""The type of evaluation matcher to use."""
34+
35+
weight: float
36+
"""How heavily to weigh the assertion within the evaluation."""

src/prompt_foundry_python_sdk/types/evaluation_assertion_update_params.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,6 @@ class EvaluationAssertionUpdateParams(TypedDict, total=False):
3131
Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"]
3232
]
3333
"""The type of evaluation matcher to use."""
34+
35+
weight: float
36+
"""How heavily to weigh the assertion within the evaluation."""

src/prompt_foundry_python_sdk/types/evaluation_create_params.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ class EvaluationCreateParams(TypedDict, total=False):
2929
variables: Required[Dict[str, Optional[str]]]
3030
"""The template variables added to the prompt when executing the prompt."""
3131

32+
threshold: float
33+
"""What percentage of assertions must pass for the evaluation to pass."""
34+
35+
weight: float
36+
"""How heavily to weigh the evaluation within the prompt."""
37+
3238

3339
class AppendedMessageContentTextContentBlockSchema(TypedDict, total=False):
3440
text: Required[str]

src/prompt_foundry_python_sdk/types/evaluation_update_params.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ class EvaluationUpdateParams(TypedDict, total=False):
2929
variables: Required[Dict[str, Optional[str]]]
3030
"""The template variables added to the prompt when executing the prompt."""
3131

32+
threshold: float
33+
"""What percentage of assertions must pass for the evaluation to pass."""
34+
35+
weight: float
36+
"""How heavily to weigh the evaluation within the prompt."""
37+
3238

3339
class AppendedMessageContentTextContentBlockSchema(TypedDict, total=False):
3440
text: Required[str]

tests/api_resources/test_evaluation_assertions.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,18 @@ def test_method_create(self, client: PromptFoundry) -> None:
3232
)
3333
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
3434

35+
@parametrize
36+
def test_method_create_with_all_params(self, client: PromptFoundry) -> None:
37+
evaluation_assertion = client.evaluation_assertions.create(
38+
evaluation_id="evaluationId",
39+
json_path="jsonPath",
40+
target_value="targetValue",
41+
tool_name="toolName",
42+
type="CONTAINS",
43+
weight=0,
44+
)
45+
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
46+
3547
@parametrize
3648
def test_raw_response_create(self, client: PromptFoundry) -> None:
3749
response = client.evaluation_assertions.with_raw_response.create(
@@ -76,6 +88,19 @@ def test_method_update(self, client: PromptFoundry) -> None:
7688
)
7789
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
7890

91+
@parametrize
92+
def test_method_update_with_all_params(self, client: PromptFoundry) -> None:
93+
evaluation_assertion = client.evaluation_assertions.update(
94+
id="1212121",
95+
evaluation_id="evaluationId",
96+
json_path="jsonPath",
97+
target_value="targetValue",
98+
tool_name="toolName",
99+
type="CONTAINS",
100+
weight=0,
101+
)
102+
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
103+
79104
@parametrize
80105
def test_raw_response_update(self, client: PromptFoundry) -> None:
81106
response = client.evaluation_assertions.with_raw_response.update(
@@ -245,6 +270,18 @@ async def test_method_create(self, async_client: AsyncPromptFoundry) -> None:
245270
)
246271
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
247272

273+
@parametrize
274+
async def test_method_create_with_all_params(self, async_client: AsyncPromptFoundry) -> None:
275+
evaluation_assertion = await async_client.evaluation_assertions.create(
276+
evaluation_id="evaluationId",
277+
json_path="jsonPath",
278+
target_value="targetValue",
279+
tool_name="toolName",
280+
type="CONTAINS",
281+
weight=0,
282+
)
283+
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
284+
248285
@parametrize
249286
async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> None:
250287
response = await async_client.evaluation_assertions.with_raw_response.create(
@@ -289,6 +326,19 @@ async def test_method_update(self, async_client: AsyncPromptFoundry) -> None:
289326
)
290327
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
291328

329+
@parametrize
330+
async def test_method_update_with_all_params(self, async_client: AsyncPromptFoundry) -> None:
331+
evaluation_assertion = await async_client.evaluation_assertions.update(
332+
id="1212121",
333+
evaluation_id="evaluationId",
334+
json_path="jsonPath",
335+
target_value="targetValue",
336+
tool_name="toolName",
337+
type="CONTAINS",
338+
weight=0,
339+
)
340+
assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"])
341+
292342
@parametrize
293343
async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> None:
294344
response = await async_client.evaluation_assertions.with_raw_response.update(

0 commit comments

Comments
 (0)