diff --git a/src/Libraries/Microsoft.Extensions.AI.Abstractions/CHANGELOG.md b/src/Libraries/Microsoft.Extensions.AI.Abstractions/CHANGELOG.md index e43507eb241..5c164e1767d 100644 --- a/src/Libraries/Microsoft.Extensions.AI.Abstractions/CHANGELOG.md +++ b/src/Libraries/Microsoft.Extensions.AI.Abstractions/CHANGELOG.md @@ -4,6 +4,8 @@ - Updated `AIFunctionFactory` to respect `[DisplayName(...)]` on functions as a way to override the function name. - Updated `AIFunctionFactory` to respect `[DefaultValue(...)]` on function parameters as a way to specify default values. +- Added `CodeInterpreterToolCallContent`/`CodeInterpreterToolResultContent` for representing code interpreter tool calls and results. +- Fixed the serialization/deserialization of variables typed as `UserInputRequestContent`/`UserInputResponseContent`. ## 9.10.1 diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/CHANGELOG.md b/src/Libraries/Microsoft.Extensions.AI.OpenAI/CHANGELOG.md index 55009861611..bf6f8ba99f2 100644 --- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/CHANGELOG.md +++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## NOT YET RELEASED + +- Updated the `IChatClient` for the OpenAI Responses API to allow either conversation or response ID for `ChatOptions.ConversationId`. +- Added an `AITool` to `ResponseTool` conversion utility. +- Updated to accommodate the additions in `Microsoft.Extensions.AI.Abstractions`. + ## 9.10.1-preview.1.25521.4 - Updated the `IChatClient` for the OpenAI Responses API to support connectors with `HostedMcpServerTool`. diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/MicrosoftExtensionsAIResponsesExtensions.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/MicrosoftExtensionsAIResponsesExtensions.cs index ea80ae8e794..6d989c0b56d 100644 --- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/MicrosoftExtensionsAIResponsesExtensions.cs +++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/MicrosoftExtensionsAIResponsesExtensions.cs @@ -62,7 +62,7 @@ public static IEnumerable AsChatMessages(this IEnumerableA converted . /// is . public static ChatResponse AsChatResponse(this OpenAIResponse response, ResponseCreationOptions? options = null) => - OpenAIResponsesChatClient.FromOpenAIResponse(Throw.IfNull(response), options); + OpenAIResponsesChatClient.FromOpenAIResponse(Throw.IfNull(response), options, conversationId: null); /// /// Creates a sequence of Microsoft.Extensions.AI instances from the specified @@ -75,7 +75,7 @@ public static ChatResponse AsChatResponse(this OpenAIResponse response, Response /// is . public static IAsyncEnumerable AsChatResponseUpdatesAsync( this IAsyncEnumerable responseUpdates, ResponseCreationOptions? options = null, CancellationToken cancellationToken = default) => - OpenAIResponsesChatClient.FromOpenAIStreamingResponseUpdatesAsync(Throw.IfNull(responseUpdates), options, cancellationToken: cancellationToken); + OpenAIResponsesChatClient.FromOpenAIStreamingResponseUpdatesAsync(Throw.IfNull(responseUpdates), options, conversationId: null, cancellationToken: cancellationToken); /// Creates an OpenAI from a . /// The response to convert. diff --git a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs index 3634a83e2a6..4d4a80be81f 100644 --- a/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs +++ b/src/Libraries/Microsoft.Extensions.AI.OpenAI/OpenAIResponsesChatClient.cs @@ -11,11 +11,13 @@ using System.Runtime.CompilerServices; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Shared.Diagnostics; using OpenAI.Responses; +#pragma warning disable S1226 // Method parameters, caught exceptions and foreach variables' initial values should not be ignored #pragma warning disable S3011 // Reflection should not be used to increase accessibility of classes, methods, or fields #pragma warning disable S3254 // Default parameter values should not be passed as arguments #pragma warning disable SA1204 // Static elements should appear before instance elements @@ -85,14 +87,14 @@ public async Task GetResponseAsync( _ = Throw.IfNull(messages); // Convert the inputs into what OpenAIResponseClient expects. - var openAIOptions = ToOpenAIResponseCreationOptions(options); + var openAIOptions = ToOpenAIResponseCreationOptions(options, out string? openAIConversationId); // Provided continuation token signals that an existing background response should be fetched. if (GetContinuationToken(messages, options) is { } token) { var response = await _responseClient.GetResponseAsync(token.ResponseId, cancellationToken).ConfigureAwait(false); - return FromOpenAIResponse(response, openAIOptions); + return FromOpenAIResponse(response, openAIOptions, openAIConversationId); } var openAIResponseItems = ToOpenAIResponseItems(messages, options); @@ -104,15 +106,15 @@ public async Task GetResponseAsync( var openAIResponse = (await task.ConfigureAwait(false)).Value; // Convert the response to a ChatResponse. - return FromOpenAIResponse(openAIResponse, openAIOptions); + return FromOpenAIResponse(openAIResponse, openAIOptions, openAIConversationId); } - internal static ChatResponse FromOpenAIResponse(OpenAIResponse openAIResponse, ResponseCreationOptions? openAIOptions) + internal static ChatResponse FromOpenAIResponse(OpenAIResponse openAIResponse, ResponseCreationOptions? openAIOptions, string? conversationId) { // Convert and return the results. ChatResponse response = new() { - ConversationId = openAIOptions?.StoredOutputEnabled is false ? null : openAIResponse.Id, + ConversationId = openAIOptions?.StoredOutputEnabled is false ? null : (conversationId ?? openAIResponse.Id), CreatedAt = openAIResponse.CreatedAt, ContinuationToken = CreateContinuationToken(openAIResponse), FinishReason = ToFinishReason(openAIResponse.IncompleteStatusDetails?.Reason), @@ -232,14 +234,14 @@ public IAsyncEnumerable GetStreamingResponseAsync( { _ = Throw.IfNull(messages); - var openAIOptions = ToOpenAIResponseCreationOptions(options); + var openAIOptions = ToOpenAIResponseCreationOptions(options, out string? openAIConversationId); // Provided continuation token signals that an existing background response should be fetched. if (GetContinuationToken(messages, options) is { } token) { IAsyncEnumerable updates = _responseClient.GetResponseStreamingAsync(token.ResponseId, token.SequenceNumber, cancellationToken); - return FromOpenAIStreamingResponseUpdatesAsync(updates, openAIOptions, token.ResponseId, cancellationToken); + return FromOpenAIStreamingResponseUpdatesAsync(updates, openAIOptions, openAIConversationId, token.ResponseId, cancellationToken); } var openAIResponseItems = ToOpenAIResponseItems(messages, options); @@ -248,24 +250,26 @@ public IAsyncEnumerable GetStreamingResponseAsync( _createResponseStreamingAsync(_responseClient, openAIResponseItems, openAIOptions, cancellationToken.ToRequestOptions(streaming: true)) : _responseClient.CreateResponseStreamingAsync(openAIResponseItems, openAIOptions, cancellationToken); - return FromOpenAIStreamingResponseUpdatesAsync(streamingUpdates, openAIOptions, cancellationToken: cancellationToken); + return FromOpenAIStreamingResponseUpdatesAsync(streamingUpdates, openAIOptions, openAIConversationId, cancellationToken: cancellationToken); } internal static async IAsyncEnumerable FromOpenAIStreamingResponseUpdatesAsync( IAsyncEnumerable streamingResponseUpdates, ResponseCreationOptions? options, + string? conversationId, string? resumeResponseId = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { DateTimeOffset? createdAt = null; string? responseId = resumeResponseId; - string? conversationId = options?.StoredOutputEnabled is false ? null : resumeResponseId; string? modelId = null; string? lastMessageId = null; ChatRole? lastRole = null; bool anyFunctions = false; ResponseStatus? latestResponseStatus = null; + UpdateConversationId(resumeResponseId); + await foreach (var streamingUpdate in streamingResponseUpdates.WithCancellation(cancellationToken).ConfigureAwait(false)) { // Create an update populated with the current state of the response. @@ -290,7 +294,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => case StreamingResponseCreatedUpdate createdUpdate: createdAt = createdUpdate.Response.CreatedAt; responseId = createdUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = createdUpdate.Response.Model; latestResponseStatus = createdUpdate.Response.Status; goto default; @@ -298,7 +302,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => case StreamingResponseQueuedUpdate queuedUpdate: createdAt = queuedUpdate.Response.CreatedAt; responseId = queuedUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = queuedUpdate.Response.Model; latestResponseStatus = queuedUpdate.Response.Status; goto default; @@ -306,7 +310,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => case StreamingResponseInProgressUpdate inProgressUpdate: createdAt = inProgressUpdate.Response.CreatedAt; responseId = inProgressUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = inProgressUpdate.Response.Model; latestResponseStatus = inProgressUpdate.Response.Status; goto default; @@ -314,7 +318,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => case StreamingResponseIncompleteUpdate incompleteUpdate: createdAt = incompleteUpdate.Response.CreatedAt; responseId = incompleteUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = incompleteUpdate.Response.Model; latestResponseStatus = incompleteUpdate.Response.Status; goto default; @@ -322,7 +326,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => case StreamingResponseFailedUpdate failedUpdate: createdAt = failedUpdate.Response.CreatedAt; responseId = failedUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = failedUpdate.Response.Model; latestResponseStatus = failedUpdate.Response.Status; goto default; @@ -331,7 +335,7 @@ ChatResponseUpdate CreateUpdate(AIContent? content = null) => { createdAt = completedUpdate.Response.CreatedAt; responseId = completedUpdate.Response.Id; - conversationId = options?.StoredOutputEnabled is false ? null : responseId; + UpdateConversationId(responseId); modelId = completedUpdate.Response.Model; latestResponseStatus = completedUpdate.Response?.Status; var update = CreateUpdate(ToUsageDetails(completedUpdate.Response) is { } usage ? new UsageContent(usage) : null); @@ -434,6 +438,18 @@ outputItemDoneUpdate.Item is MessageResponseItem mri && break; } } + + void UpdateConversationId(string? id) + { + if (options?.StoredOutputEnabled is false) + { + conversationId = null; + } + else + { + conversationId ??= id; + } + } } /// @@ -563,25 +579,100 @@ private static ChatRole ToChatRole(MessageRole? role) => null; /// Converts a to a . - private ResponseCreationOptions ToOpenAIResponseCreationOptions(ChatOptions? options) + private ResponseCreationOptions ToOpenAIResponseCreationOptions(ChatOptions? options, out string? openAIConversationId) { + openAIConversationId = null; + if (options is null) { return new ResponseCreationOptions(); } - if (options.RawRepresentationFactory?.Invoke(this) is not ResponseCreationOptions result) + bool hasRawRco = false; + if (options.RawRepresentationFactory?.Invoke(this) is ResponseCreationOptions result) + { + hasRawRco = true; + } + else { result = new ResponseCreationOptions(); } - // Handle strongly-typed properties. result.MaxOutputTokenCount ??= options.MaxOutputTokens; - result.PreviousResponseId ??= options.ConversationId; result.Temperature ??= options.Temperature; result.TopP ??= options.TopP; result.BackgroundModeEnabled ??= options.AllowBackgroundResponses; + // If the ResponseCreationOptions.PreviousResponseId is already set (likely rare), then we don't need to do + // anything with regards to Conversation, because they're mutually exclusive and we would want to ignore + // ChatOptions.ConversationId regardless of its value. If it's null, we want to examine the ResponseCreationOptions + // instance to see if a conversation ID has already been set on it and use that conversation ID subsequently if + // it has. If one hasn't been set, but ChatOptions.ConversationId has been set, we'll either set + // ResponseCreationOptions.Conversation if the string represents a conversation ID or else PreviousResponseId. + if (result.PreviousResponseId is null) + { + // Technically, OpenAI's IDs are opaque. However, by convention conversation IDs start with "conv_" and + // we can use that to disambiguate whether we're looking at a conversation ID or a response ID. + string? chatOptionsConversationId = options.ConversationId; + bool chatOptionsHasOpenAIConversationId = chatOptionsConversationId?.StartsWith("conv_", StringComparison.OrdinalIgnoreCase) is true; + + if (hasRawRco || chatOptionsHasOpenAIConversationId) + { + const string ConversationPropertyName = "conversation"; + try + { + // ResponseCreationOptions currently doesn't expose either Conversation nor JSON Path for accessing + // arbitrary properties publicly. Until it does, we need to serialize the RCO and examine + // and possibly mutate/deserialize the resulting JSON. + var rcoJsonModel = (IJsonModel)result; + var rcoJsonBinaryData = rcoJsonModel.Write(ModelReaderWriterOptions.Json); + if (JsonNode.Parse(rcoJsonBinaryData.ToMemory().Span) is JsonObject rcoJsonObject) + { + // Check if a conversation ID is already set on the RCO. If one is, store it for later. + if (rcoJsonObject.TryGetPropertyValue(ConversationPropertyName, out JsonNode? existingConversationNode)) + { + switch (existingConversationNode?.GetValueKind()) + { + case JsonValueKind.String: + openAIConversationId = existingConversationNode.GetValue(); + break; + + case JsonValueKind.Object: + openAIConversationId = + existingConversationNode.AsObject().TryGetPropertyValue("id", out JsonNode? idNode) && idNode?.GetValueKind() == JsonValueKind.String ? + idNode.GetValue() : + null; + break; + } + } + + // If one isn't set, and ChatOptions.ConversationId is set to a conversation ID, set it now. + if (openAIConversationId is null && chatOptionsHasOpenAIConversationId) + { + rcoJsonObject[ConversationPropertyName] = JsonValue.Create(chatOptionsConversationId); + rcoJsonBinaryData = new(JsonSerializer.SerializeToUtf8Bytes(rcoJsonObject, AIJsonUtilities.DefaultOptions.GetTypeInfo(typeof(JsonNode)))); + if (rcoJsonModel.Create(rcoJsonBinaryData, ModelReaderWriterOptions.Json) is ResponseCreationOptions newRco) + { + result = newRco; + openAIConversationId = chatOptionsConversationId; + } + } + } + } + catch + { + // Ignore any JSON formatting / parsing failures + } + } + + // If after all that we still don't have a conversation ID, and ChatOptions.ConversationId is set, + // treat it as a response ID. + if (openAIConversationId is null && options.ConversationId is { } previousResponseId) + { + result.PreviousResponseId = previousResponseId; + } + } + if (options.Instructions is { } instructions) { result.Instructions = string.IsNullOrEmpty(result.Instructions) ? diff --git a/src/Libraries/Microsoft.Extensions.AI/CHANGELOG.md b/src/Libraries/Microsoft.Extensions.AI/CHANGELOG.md index 5b099dbd62f..d0de268ecaf 100644 --- a/src/Libraries/Microsoft.Extensions.AI/CHANGELOG.md +++ b/src/Libraries/Microsoft.Extensions.AI/CHANGELOG.md @@ -1,5 +1,9 @@ # Release History +## NOT YET RELEASED + +- Updated the Open Telemetry instrumentation to conform to the latest 1.38 draft specification of the Semantic Conventions for Generative AI systems. + ## 9.10.1 - Added an `[Experimental]` implementation of tool reduction component for constraining the set of tools exposed. @@ -23,7 +27,7 @@ - Added `FunctionInvokingChatClient` support for non-invocable tools and `TerminateOnUnknownCalls` property. - Added support to `FunctionInvokingChatClient` for user approval of function invocations. -- Updated the Open Telemetry instrumentation to conform to the latest 1.37.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated the Open Telemetry instrumentation to conform to the latest 1.37 draft specification of the Semantic Conventions for Generative AI systems. - Fixed `GetResponseAsync` to only look at the contents of the last message in the response. ## 9.8.0 @@ -36,20 +40,20 @@ - Added `FunctionInvokingChatClient.FunctionInvoker` to simplify customizing how functions are invoked. - Increased the default `FunctionInvokingChatClient.MaximumIterationsPerRequest` value from 10 to 40. -- Updated the Open Telemetry instrumentation to conform to the latest 1.36.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated the Open Telemetry instrumentation to conform to the latest 1.36 draft specification of the Semantic Conventions for Generative AI systems. - Updated to accommodate the additions in `Microsoft.Extensions.AI.Abstractions`. ## 9.7.0 - Added `DistributedCachingChatClient/EmbeddingGenerator.AdditionalCacheKeyValues` to allow adding additional values to the cache key. - Allowed a `CachingChatClient` to control per-request caching. -- Updated the Open Telemetry instrumentation to conform to the latest 1.35.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated the Open Telemetry instrumentation to conform to the latest 1.35 draft specification of the Semantic Conventions for Generative AI systems. - Updated to accommodate the additions in `Microsoft.Extensions.AI.Abstractions`. ## 9.6.0 - Fixed hashing in `CachingChatClient` and `CachingEmbeddingGenerator` to be stable with respect to indentation settings and property ordering. -- Updated the Open Telemetry instrumentation to conform to the latest 1.34.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated the Open Telemetry instrumentation to conform to the latest 1.34 draft specification of the Semantic Conventions for Generative AI systems. - Updated to accommodate the additions in `Microsoft.Extensions.AI.Abstractions`. ## 9.5.0 @@ -58,7 +62,7 @@ - Changed `FunctionInvokingChatClient` to respect the `SynchronizationContext` of the caller when invoking functions. - Changed hash function algorithm used in `CachingChatClient` and `CachingEmbeddingGenerator` to SHA-384 instead of SHA-256. - Updated `FunctionInvokingChatClient` to include token counts on its emitted diagnostic spans. -- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.33.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.33 draft specification of the Semantic Conventions for Generative AI systems. - Renamed the `useJsonSchema` paramter of `GetResponseAsync`. - Removed debug-level logging of updates in `LoggingChatClient`. - Avoided caching in `CachingChatClient` when `ConversationId` is set. @@ -68,7 +72,7 @@ - Fixed `CachingChatClient` to avoid caching when `ConversationId` is set. - Renamed `useJsonSchema` parameter in `GetResponseAsync` to `useJsonSchemaResponseFormat`. -- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.33.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.32 draft specification of the Semantic Conventions for Generative AI systems. ## 9.4.3-preview.1.25230.7 @@ -117,12 +121,12 @@ ## 9.3.0-preview.1.25114.11 -- Updated `OpenTelemetryChatClient`/`OpenTelemetryEmbeddingGenerator` to conform to the latest 1.30.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated `OpenTelemetryChatClient`/`OpenTelemetryEmbeddingGenerator` to conform to the latest 1.30 draft specification of the Semantic Conventions for Generative AI systems. ## 9.1.0-preview.1.25064.3 - Added `FunctionInvokingChatClient.CurrentContext` to give functions access to detailed function invocation information. -- Updated `OpenTelemetryChatClient`/`OpenTelemetryEmbeddingGenerator` to conform to the latest 1.29.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated `OpenTelemetryChatClient`/`OpenTelemetryEmbeddingGenerator` to conform to the latest 1.29 draft specification of the Semantic Conventions for Generative AI systems. - Updated `FunctionInvokingChatClient` to emit an `Activity`/span around all interactions related to a single chat operation. ## 9.0.1-preview.1.24570.5 @@ -154,7 +158,7 @@ - Improved the readability of JSON generated as part of logging. - Fixed handling of generated JSON schema names when using arrays or generic types. - Improved `CachingChatClient`'s coalescing of streaming updates, including reduced memory allocation and enhanced metadata propagation. -- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.28.0 draft specification of the Semantic Conventions for Generative AI systems. +- Updated `OpenTelemetryChatClient` and `OpenTelemetryEmbeddingGenerator` to conform to the latest 1.28 draft specification of the Semantic Conventions for Generative AI systems. - Improved `CompleteAsync`'s structured output support to handle primitive types, enums, and arrays. ## 9.0.0-preview.9.24507.7 diff --git a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs index aefa069948f..11f73d6db52 100644 --- a/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs +++ b/test/Libraries/Microsoft.Extensions.AI.OpenAI.Tests/OpenAIResponseClientTests.cs @@ -11,6 +11,7 @@ using System.Net.Http; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading.Tasks; using Microsoft.Extensions.Caching.Distributed; using Microsoft.Extensions.Caching.Memory; @@ -2607,6 +2608,755 @@ public async Task RequestHeaders_UserAgent_ContainsMEAI() Assert.Contains("MEAI", e.Message); } + [Fact] + public async Task ConversationId_AsResponseId_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "previous_response_id":"resp_12345", + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "resp_12345", + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("resp_67890", response.ConversationId); + } + + [Fact] + public async Task ConversationId_AsConversationId_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "conversation":"conv_12345", + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "conv_12345", + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("conv_12345", response.ConversationId); + } + + [Fact] + public async Task ConversationId_WhenStoreDisabled_ReturnsNull_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "store":false, + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "store": false, + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + StoredOutputEnabled = false + } + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Null(response.ConversationId); + } + + [Fact] + public async Task ConversationId_ChatOptionsOverridesRawRepresentationResponseId_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "previous_response_id":"resp_override", + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "resp_override", + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + PreviousResponseId = null + } + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("resp_67890", response.ConversationId); + } + + [Fact] + public async Task ConversationId_RawRepresentationPreviousResponseIdTakesPrecedence_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "previous_response_id":"resp_fromraw", + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "conv_ignored", + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + PreviousResponseId = "resp_fromraw" + } + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("resp_67890", response.ConversationId); + } + + [Fact] + public async Task ConversationId_WhenStoreExplicitlyTrue_UsesResponseId_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "store":true, + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "store": true, + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + StoredOutputEnabled = true + } + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("resp_67890", response.ConversationId); + } + + [Fact] + public async Task ConversationId_WhenStoreExplicitlyTrue_UsesResponseId_Streaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "store":true, + "input":[ + { + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + } + ], + "stream":true, + "max_output_tokens":20 + } + """; + + const string Output = """ + event: response.created + data: {"type":"response.created","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"in_progress","role":"assistant","content":[]}} + + event: response.content_part.added + data: {"type":"response.content_part.added","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"Hello"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"!"} + + event: response.output_text.done + data: {"type":"response.output_text.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"text":"Hello!"} + + event: response.content_part.done + data: {"type":"response.content_part.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Hello!","annotations":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}} + + event: response.completed + data: {"type":"response.completed","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":{"input_tokens":26,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":36},"user":null,"metadata":{}}} + + + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + List updates = []; + await foreach (var update in client.GetStreamingResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + StoredOutputEnabled = true + } + })) + { + updates.Add(update); + } + + Assert.Equal("Hello!", string.Concat(updates.Select(u => u.Text))); + + for (int i = 0; i < updates.Count; i++) + { + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ResponseId); + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ConversationId); + } + } + + [Fact] + public async Task ConversationId_WhenStoreDisabled_ReturnsNull_Streaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "store":false, + "input":[ + { + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + } + ], + "stream":true, + "max_output_tokens":20 + } + """; + + const string Output = """ + event: response.created + data: {"type":"response.created","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":false,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":false,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"in_progress","role":"assistant","content":[]}} + + event: response.content_part.added + data: {"type":"response.content_part.added","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"Hello"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"!"} + + event: response.output_text.done + data: {"type":"response.output_text.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"text":"Hello!"} + + event: response.content_part.done + data: {"type":"response.content_part.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Hello!","annotations":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}} + + event: response.completed + data: {"type":"response.completed","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":false,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":{"input_tokens":26,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":36},"user":null,"metadata":{}}} + + + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + List updates = []; + await foreach (var update in client.GetStreamingResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + RawRepresentationFactory = (c) => new ResponseCreationOptions + { + StoredOutputEnabled = false + } + })) + { + updates.Add(update); + } + + Assert.Equal("Hello!", string.Concat(updates.Select(u => u.Text))); + + for (int i = 0; i < updates.Count; i++) + { + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ResponseId); + Assert.Null(updates[i].ConversationId); + } + } + + [Fact] + public async Task ConversationId_AsConversationId_Streaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "conversation":"conv_12345", + "input":[ + { + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + } + ], + "stream":true, + "max_output_tokens":20 + } + """; + + const string Output = """ + event: response.created + data: {"type":"response.created","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"in_progress","role":"assistant","content":[]}} + + event: response.content_part.added + data: {"type":"response.content_part.added","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"Hello"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"!"} + + event: response.output_text.done + data: {"type":"response.output_text.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"text":"Hello!"} + + event: response.content_part.done + data: {"type":"response.content_part.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Hello!","annotations":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}} + + event: response.completed + data: {"type":"response.completed","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":{"input_tokens":26,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":36},"user":null,"metadata":{}}} + + + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + List updates = []; + await foreach (var update in client.GetStreamingResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "conv_12345", + })) + { + updates.Add(update); + } + + Assert.Equal("Hello!", string.Concat(updates.Select(u => u.Text))); + + for (int i = 0; i < updates.Count; i++) + { + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ResponseId); + Assert.Equal("conv_12345", updates[i].ConversationId); + } + } + + [Fact] + public async Task ConversationId_AsResponseId_Streaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "previous_response_id":"resp_12345", + "input":[ + { + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + } + ], + "stream":true, + "max_output_tokens":20 + } + """; + + const string Output = """ + event: response.created + data: {"type":"response.created","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"in_progress","role":"assistant","content":[]}} + + event: response.content_part.added + data: {"type":"response.content_part.added","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"Hello"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"delta":"!"} + + event: response.output_text.done + data: {"type":"response.output_text.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"text":"Hello!"} + + event: response.content_part.done + data: {"type":"response.content_part.done","item_id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Hello!","annotations":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}} + + event: response.completed + data: {"type":"response.completed","response":{"id":"resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77","object":"response","created_at":1741892091,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":20,"model":"gpt-4o-mini-2024-07-18","output":[{"type":"message","id":"msg_67d329fc0c0081919696b8ab36713a41029dabe3ee19bb77","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Hello!","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":0.5,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"usage":{"input_tokens":26,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":36},"user":null,"metadata":{}}} + + + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + List updates = []; + await foreach (var update in client.GetStreamingResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "resp_12345", + })) + { + updates.Add(update); + } + + Assert.Equal("Hello!", string.Concat(updates.Select(u => u.Text))); + + for (int i = 0; i < updates.Count; i++) + { + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ResponseId); + Assert.Equal("resp_67d329fbc87c81919f8952fe71dafc96029dabe3ee19bb77", updates[i].ConversationId); + } + } + + [Fact] + public async Task ConversationId_RawRepresentationConversationIdTakesPrecedence_NonStreaming() + { + const string Input = """ + { + "temperature":0.5, + "model":"gpt-4o-mini", + "conversation":"conv_12345", + "input": [{ + "type":"message", + "role":"user", + "content":[{"type":"input_text","text":"hello"}] + }], + "max_output_tokens":20 + } + """; + + const string Output = """ + { + "id": "resp_67890", + "object": "response", + "created_at": 1741891428, + "status": "completed", + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "type": "message", + "id": "msg_67d32764fcdc8191bcf2e444d4088804058a5e08c46a181d", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "output_text", + "text": "Hello! How can I assist you today?", + "annotations": [] + } + ] + } + ] + } + """; + + using VerbatimHttpHandler handler = new(Input, Output); + using HttpClient httpClient = new(handler); + using IChatClient client = CreateResponseClient(httpClient, "gpt-4o-mini"); + + var rcoJsonModel = (IJsonModel)new ResponseCreationOptions(); + BinaryData rcoJsonBinaryData = rcoJsonModel.Write(ModelReaderWriterOptions.Json); + JsonObject rcoJsonObject = Assert.IsType(JsonNode.Parse(rcoJsonBinaryData.ToMemory().Span)); + Assert.Null(rcoJsonObject["conversation"]); + rcoJsonObject["conversation"] = "conv_12345"; + + var response = await client.GetResponseAsync("hello", new() + { + MaxOutputTokens = 20, + Temperature = 0.5f, + ConversationId = "conv_ignored", + RawRepresentationFactory = (c) => rcoJsonModel.Create( + new BinaryData(JsonSerializer.SerializeToUtf8Bytes(rcoJsonObject)), + ModelReaderWriterOptions.Json) + }); + + Assert.NotNull(response); + Assert.Equal("resp_67890", response.ResponseId); + Assert.Equal("conv_12345", response.ConversationId); + } + private static IChatClient CreateResponseClient(HttpClient httpClient, string modelId) => new OpenAIClient( new ApiKeyCredential("apikey"),