Skip to content

Commit edb5615

Browse files
authored
feat: Add new parallel_chat() feature (#188)
* feat: Add new parallel_chat() feature for easily submitting concurrent requests * Update changelog * Make tests more robust * Fix examples * Update function reference * Update types * serialized tool calling * refactor logic * A start on proper error handling * Return None if request was never submitted * Add overloads * Undo unnecessary locking * Attach chat to structured result * Doctsring fixes * Clean up error handling tests
1 parent ec189ab commit edb5615

File tree

11 files changed

+1571
-2
lines changed

11 files changed

+1571
-2
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111

1212
### New features
1313

14+
* Added new family of functions (`parallel_chat()`, `parallel_chat_text()`, and `parallel_chat_structured()`) for submitting multiple prompts at once with some basic rate limiting toggles. (#188)
1415
* Added support for systematic evaluation via [Inspect AI](https://inspect.aisi.org.uk/). This includes:
1516
* A new `.export_eval()` method for exporting conversation history as an Inspect eval dataset sample. This supports multi-turn conversations, tool calls, images, PDFs, and structured data.
1617
* A new `.to_solver()` method for translating chat instances into Inspect solvers that can be used with Inspect's evaluation framework.

chatlas/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from ._content_image import content_image_file, content_image_plot, content_image_url
1717
from ._content_pdf import content_pdf_file, content_pdf_url
1818
from ._interpolate import interpolate, interpolate_file
19+
from ._parallel import parallel_chat, parallel_chat_structured, parallel_chat_text
1920
from ._provider import Provider
2021
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
2122
from ._provider_cloudflare import ChatCloudflare
@@ -48,6 +49,9 @@
4849
"batch_chat_completed",
4950
"batch_chat_structured",
5051
"batch_chat_text",
52+
"parallel_chat",
53+
"parallel_chat_structured",
54+
"parallel_chat_text",
5155
"ChatAnthropic",
5256
"ChatAuto",
5357
"ChatBedrockAnthropic",

chatlas/_chat.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1259,6 +1259,7 @@ def chat_structured(
12591259
data_model: type[BaseModelT],
12601260
echo: EchoOptions = "none",
12611261
stream: bool = False,
1262+
kwargs: Optional[SubmitInputArgsT] = None,
12621263
) -> BaseModelT:
12631264
"""
12641265
Extract structured data.
@@ -1279,6 +1280,9 @@ def chat_structured(
12791280
- `"none"`: Do not echo any content.
12801281
stream
12811282
Whether to stream the response (i.e., have the response appear in chunks).
1283+
kwargs
1284+
Additional keyword arguments to pass to the method used for requesting
1285+
the response.
12821286
12831287
Returns
12841288
-------
@@ -1290,6 +1294,7 @@ def chat_structured(
12901294
data_model=data_model,
12911295
echo=echo,
12921296
stream=stream,
1297+
kwargs=kwargs,
12931298
)
12941299
return data_model.model_validate(dat)
12951300

@@ -1322,6 +1327,7 @@ def _submit_and_extract_data(
13221327
data_model: type[BaseModel],
13231328
echo: EchoOptions = "none",
13241329
stream: bool = False,
1330+
kwargs: Optional[SubmitInputArgsT] = None,
13251331
) -> dict[str, Any]:
13261332
display = self._markdown_display(echo=echo)
13271333

@@ -1331,6 +1337,7 @@ def _submit_and_extract_data(
13311337
data_model=data_model,
13321338
echo=echo,
13331339
stream=stream,
1340+
kwargs=kwargs,
13341341
)
13351342
)
13361343

@@ -1349,6 +1356,7 @@ async def chat_structured_async(
13491356
data_model: type[BaseModelT],
13501357
echo: EchoOptions = "none",
13511358
stream: bool = False,
1359+
kwargs: Optional[SubmitInputArgsT] = None,
13521360
) -> BaseModelT:
13531361
"""
13541362
Extract structured data from the given input asynchronously.
@@ -1370,6 +1378,9 @@ async def chat_structured_async(
13701378
stream
13711379
Whether to stream the response (i.e., have the response appear in chunks).
13721380
Defaults to `True` if `echo` is not "none".
1381+
kwargs
1382+
Additional keyword arguments to pass to the method used for requesting
1383+
the response.
13731384
13741385
Returns
13751386
-------
@@ -1381,6 +1392,7 @@ async def chat_structured_async(
13811392
data_model=data_model,
13821393
echo=echo,
13831394
stream=stream,
1395+
kwargs=kwargs,
13841396
)
13851397
return data_model.model_validate(dat)
13861398

@@ -1390,6 +1402,7 @@ async def extract_data_async(
13901402
data_model: type[BaseModel],
13911403
echo: EchoOptions = "none",
13921404
stream: bool = False,
1405+
kwargs: Optional[SubmitInputArgsT] = None,
13931406
) -> dict[str, Any]:
13941407
"""
13951408
Deprecated: use `.chat_structured_async()` instead.
@@ -1405,6 +1418,7 @@ async def extract_data_async(
14051418
data_model=data_model,
14061419
echo=echo,
14071420
stream=stream,
1421+
kwargs=kwargs,
14081422
)
14091423

14101424
async def _submit_and_extract_data_async(
@@ -1413,6 +1427,7 @@ async def _submit_and_extract_data_async(
14131427
data_model: type[BaseModel],
14141428
echo: EchoOptions = "none",
14151429
stream: bool = False,
1430+
kwargs: Optional[SubmitInputArgsT] = None,
14161431
) -> dict[str, Any]:
14171432
display = self._markdown_display(echo=echo)
14181433

@@ -1422,6 +1437,7 @@ async def _submit_and_extract_data_async(
14221437
data_model=data_model,
14231438
echo=echo,
14241439
stream=stream,
1440+
kwargs=kwargs,
14251441
)
14261442
)
14271443

0 commit comments

Comments
 (0)