diff --git a/Directory.Build.targets b/Directory.Build.targets index ba40ce5..43c43fd 100644 --- a/Directory.Build.targets +++ b/Directory.Build.targets @@ -5,6 +5,7 @@ enable - IDE0130 + + IDE0130;NU5104 - \ No newline at end of file + diff --git a/src/ByteArrayExtensions.cs b/src/ByteArrayExtensions.cs index 92028f3..ad5245b 100644 --- a/src/ByteArrayExtensions.cs +++ b/src/ByteArrayExtensions.cs @@ -1,7 +1,3 @@ -using System; -using System.Collections.Generic; -using System.Linq; - namespace OllamaSharp; /// diff --git a/src/Chat.cs b/src/Chat.cs index 2f3125b..413307f 100644 --- a/src/Chat.cs +++ b/src/Chat.cs @@ -1,9 +1,4 @@ -using System; -using System.Collections.Generic; -using System.Linq; using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; using OllamaSharp.Models; using OllamaSharp.Models.Chat; @@ -61,28 +56,61 @@ public class Chat public RequestOptions? Options { get; set; } /// - /// Creates a new chat instance + /// Initializes a new instance of the class. + /// This basic constructor sets up the chat without a predefined system prompt. /// - /// The Ollama client to use for the chat - /// An optional system prompt to define the behavior of the chat assistant + /// + /// An implementation of the interface, used for managing communication with the chat backend. + /// /// - /// If the client is null, an is thrown. + /// Thrown when the parameter is null. /// /// - /// Setting up a chat with a system prompt: + /// Setting up a chat instance without a system prompt: /// - /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); - /// var prompt = "You are a helpful assistant that will answer any question you are asked."; - /// var chat = new Chat(client, prompt); + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var chat = new Chat(client); + /// + /// // Sending a message to the chat + /// chat.SendMessage("Hello, how are you?"); /// /// - public Chat(IOllamaApiClient client, string systemPrompt = "") + public Chat(IOllamaApiClient client) { Client = client ?? throw new ArgumentNullException(nameof(client)); Model = Client.SelectedModel; + } + + /// + /// Initializes a new instance of the class with a custom system prompt. + /// This constructor allows you to define the assistant's initial behavior or personality using a system prompt. + /// + /// + /// An implementation of the interface, used for managing communication with the chat backend. + /// + /// + /// A string representing the system prompt that defines the behavior and context for the chat assistant. For example, you can set the assistant to be helpful, humorous, or focused on a specific domain. + /// + /// + /// Thrown when the parameter is null. + /// + /// + /// Creating a chat instance with a custom system prompt: + /// + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var systemPrompt = "You are an expert assistant specializing in data science."; + /// var chat = new Chat(client, systemPrompt); + /// + /// // Sending a message to the chat + /// chat.SendMessage("Can you explain neural networks?"); + /// + /// + public Chat(IOllamaApiClient client, string systemPrompt) : this(client) + { + if (string.IsNullOrWhiteSpace(systemPrompt)) + return; - if (!string.IsNullOrEmpty(systemPrompt)) - Messages.Add(new Message(ChatRole.System, systemPrompt)); + Messages.Add(new Message(ChatRole.System, systemPrompt)); } /// @@ -146,7 +174,7 @@ public IAsyncEnumerable SendAsync(string message, IEnumerable SendAsync(string message, IEnumerable? i => SendAsync(message, tools: null, imagesAsBase64: imagesAsBase64, cancellationToken: cancellationToken); /// - /// Sends a message to the currently selected model and streams its response + /// Sends a message to the currently selected model and streams its response. + /// Allows for optional tools, images, or response formatting to customize the interaction. /// - /// The message to send - /// Tools that the model can make use of, see https://ollama.com/blog/tool-support. By using tools, response streaming is automatically turned off - /// Base64 encoded images to send to the model - /// Accepts "json" or an object created with JsonSerializerOptions.Default.GetJsonSchemaAsNode - /// The token to cancel the operation with - public IAsyncEnumerable SendAsync(string message, IEnumerable? tools, IEnumerable? imagesAsBase64 = null, object? format = null, CancellationToken cancellationToken = default) - => SendAsAsync(ChatRole.User, message, tools: tools, imagesAsBase64: imagesAsBase64, format: format, cancellationToken: cancellationToken); + /// + /// The message to send to the chat model as a string. + /// + /// + /// A collection of instances that the model can utilize. + /// Enabling tools automatically disables response streaming. For more information, see the tools documentation: Tool Support. + /// + /// + /// An optional collection of images encoded as Base64 strings to pass into the model. + /// + /// + /// Specifies the response format. Can be set to "json" or an object created with JsonSerializerOptions.Default.GetJsonSchemaAsNode. + /// + /// + /// A to observe while waiting for the operation to complete. + /// + /// + /// An asynchronous enumerable stream of string responses from the model. + /// + /// + /// Example usage of : + /// + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var chat = new Chat(client); + /// var tools = new List { new Tool() }; // Example tools + /// var images = new List { ConvertImageToBase64("path-to-image.jpg") }; + /// await foreach (var response in chat.SendAsync( + /// "Tell me about recent advancements in AI.", + /// tools: tools, + /// imagesAsBase64: images, + /// format: "json", + /// cancellationToken: CancellationToken.None)) + /// { + /// Console.WriteLine(response); + /// } + /// + /// + public IAsyncEnumerable SendAsync(string message, IEnumerable? tools, + IEnumerable? imagesAsBase64 = null, object? format = null, + CancellationToken cancellationToken = default) + => SendAsAsync(ChatRole.User, message, tools: tools, imagesAsBase64: imagesAsBase64, format: format, + cancellationToken: cancellationToken); /// - /// Sends a message in a given role to the currently selected model and streams its response + /// Sends a message in a given role to the currently selected model and streams its response. /// - /// The role in which the message should be sent - /// The message to send - /// The token to cancel the operation with - public IAsyncEnumerable SendAsAsync(ChatRole role, string message, CancellationToken cancellationToken = default) + /// + /// The role in which the message should be sent, represented by a . + /// + /// + /// The message to be sent as a string. + /// + /// + /// An optional to observe while waiting for the response. + /// + /// + /// An of strings representing the streamed response from the server. + /// + /// + /// Example usage of the method: + /// + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var chat = new Chat(client); + /// var role = new ChatRole("assistant"); + /// var responseStream = chat.SendAsAsync(role, "How can I assist you today?"); + /// await foreach (var response in responseStream) + /// { + /// Console.WriteLine(response); // Streams and prints the response from the server + /// } + /// + /// + public IAsyncEnumerable SendAsAsync(ChatRole role, string message, + CancellationToken cancellationToken = default) => SendAsAsync(role, message, tools: null, imagesAsBase64: null, cancellationToken: cancellationToken); /// - /// Sends a message in a given role to the currently selected model and streams its response + /// Sends a message in a given role to the currently selected model and streams its response asynchronously. /// - /// The role in which the message should be sent - /// The message to send - /// Images in byte representation to send to the model - /// The token to cancel the operation with - public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable>? imagesAsBytes, CancellationToken cancellationToken = default) + /// + /// The role in which the message should be sent. Refer to for supported roles. + /// + /// + /// The message to send to the model. + /// + /// + /// Optional images represented as byte arrays to include in the request. This parameter can be null. + /// + /// + /// A cancellation token to observe while waiting for the response. + /// By default, this parameter is set to . + /// + /// + /// An of strings representing the streamed response generated by the model. + /// + /// + /// Sending a user message with optional images: + /// + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var chat = new Chat(client); + /// var role = new ChatRole("user"); + /// var message = "What's the weather like today?"; + /// var images = new List<IEnumerable<byte>> { File.ReadAllBytes("exampleImage.jpg") }; + /// await foreach (var response in chat.SendAsAsync(role, message, images, CancellationToken.None)) + /// { + /// Console.WriteLine(response); + /// } + /// + /// + public IAsyncEnumerable SendAsAsync(ChatRole role, string message, + IEnumerable>? imagesAsBytes, CancellationToken cancellationToken = default) => SendAsAsync(role, message, imagesAsBase64: imagesAsBytes?.ToBase64(), cancellationToken: cancellationToken); /// - /// Sends a message in a given role to the currently selected model and streams its response + /// Sends a message with a specified role to the current model and streams the response as an asynchronous sequence of strings. /// - /// The role in which the message should be sent - /// The message to send - /// Base64 encoded images to send to the model - /// The token to cancel the operation with - public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? imagesAsBase64, CancellationToken cancellationToken = default) - => SendAsAsync(role, message, tools: null, imagesAsBase64: imagesAsBase64, cancellationToken: cancellationToken); + /// + /// The role from which the message originates, such as "User" or "Assistant". + /// + /// + /// The message to send to the model. + /// + /// + /// Optional collection of images, encoded in Base64 format, to include with the message. + /// + /// + /// A token that can be used to cancel the operation. + /// + /// + /// An asynchronous sequence of strings representing the streamed response from the model. + /// + /// + /// + /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest"); + /// var chat = new Chat(client) + /// { + /// Model = "llama3.2-vision:latest" + /// }; + /// // Sending a message as a user role and processing the response + /// await foreach (var response in chat.SendAsAsync(ChatRole.User, "Describe the image", null)) + /// { + /// Console.WriteLine(response); + /// } + /// + /// + public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? imagesAsBase64, + CancellationToken cancellationToken = default) + => SendAsAsync(role, message, tools: null, imagesAsBase64: imagesAsBase64, + cancellationToken: cancellationToken); /// - /// Sends a message in a given role to the currently selected model and streams its response + /// Sends a message as a specified role to the current model and streams back its response as an asynchronous enumerable. /// - /// The role in which the message should be sent - /// The message to send - /// Tools that the model can make use of, see https://ollama.com/blog/tool-support. By using tools, response streaming is automatically turned off - /// Base64 encoded images to send to the model - /// Accepts "json" or an object created with JsonSerializerOptions.Default.GetJsonSchemaAsNode - /// The token to cancel the operation with - public async IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? tools, IEnumerable? imagesAsBase64 = null, object? format = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + /// + /// The role in which the message should be sent. This determines the context or perspective of the message. + /// + /// + /// The message that needs to be sent to the chat model. + /// + /// + /// A collection of tools available for the model to utilize. Tools can alter the behavior of the model, such as turning off response streaming automatically when used. + /// + /// + /// An optional collection of images encoded in Base64 format, which are sent along with the message to the model. + /// + /// + /// Defines the response format. Acceptable values include "json" or a schema object created with JsonSerializerOptions.Default.GetJsonSchemaAsNode. + /// + /// + /// A token to cancel the ongoing operation if required. + /// + /// + /// An asynchronous enumerable of response strings streamed from the model. + /// + /// + /// Thrown if the argument is of type by mistake, or if any unsupported types are passed. + /// + /// + /// Using the method to send a message and stream the model's response: + /// + /// var chat = new Chat(client); + /// var role = new ChatRole("assistant"); + /// var tools = new List<Tool>(); + /// var images = new List<string> { "base64EncodedImageData" }; + /// await foreach (var response in chat.SendAsAsync(role, "Generate a summary for the attached image", tools, images)) + /// { + /// Console.WriteLine($"Received response: {response}"); + /// } + /// + /// + public async IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? tools, + IEnumerable? imagesAsBase64 = null, object? format = null, + [EnumeratorCancellation] CancellationToken cancellationToken = default) { if (format is CancellationToken) - throw new NotSupportedException($"Argument \"{nameof(format)}\" cannot be of type {nameof(CancellationToken)}. Make sure you use the correct method overload of {nameof(Chat)}{nameof(SendAsync)}() or {nameof(Chat)}{nameof(SendAsAsync)}()."); + throw new NotSupportedException( + $"Argument \"{nameof(format)}\" cannot be of type {nameof(CancellationToken)}. Make sure you use the correct method overload of {nameof(Chat)}{nameof(SendAsync)}() or {nameof(Chat)}{nameof(SendAsAsync)}()."); Messages.Add(new Message(role, message, imagesAsBase64?.ToArray())); @@ -226,17 +401,16 @@ public async IAsyncEnumerable SendAsAsync(ChatRole role, string message, }; var messageBuilder = new MessageBuilder(); - await foreach (var answer in Client.ChatAsync(request, cancellationToken).ConfigureAwait(false)) { - if (answer is not null) - { - messageBuilder.Append(answer); - yield return answer.Message.Content ?? string.Empty; - } + if (answer is null) continue; + + messageBuilder.Append(answer); + + yield return answer.Message.Content ?? string.Empty; } if (messageBuilder.HasValue) Messages.Add(messageBuilder.ToMessage()); } -} +} \ No newline at end of file diff --git a/src/CollectionExtensions.cs b/src/CollectionExtensions.cs new file mode 100644 index 0000000..d4dc90f --- /dev/null +++ b/src/CollectionExtensions.cs @@ -0,0 +1,67 @@ +namespace OllamaSharp; + +/// +/// Provides extension methods for working with collections. +/// +internal static class CollectionExtensions +{ + /// + /// Adds the elements of the specified collection to the end of the list if the collection is not null. + /// + /// The type of elements in the list and collection. + /// The list to which the elements should be added. + /// + /// The collection whose elements should be added to the list. + /// If null, no operations are performed. + /// + /// + /// Example usage: + /// + /// List<int> myList = new List<int> { 1, 2, 3 }; + /// IEnumerable<int>? additionalItems = new List<int> { 4, 5, 6 }; + /// myList.AddRangeIfNotNull(additionalItems); + /// // myList now contains { 1, 2, 3, 4, 5, 6 } + /// IEnumerable<int>? nullItems = null; + /// myList.AddRangeIfNotNull(nullItems); + /// // myList remains unchanged { 1, 2, 3, 4, 5, 6 } + /// + /// + public static void AddRangeIfNotNull(this List list, IEnumerable? items) + { + if (items is not null) + list.AddRange(items); + } + + /// + /// Executes the specified action for each item in the provided collection. + /// + /// The type of the elements in the collection. + /// + /// The enumerable collection whose elements the action will be performed upon. + /// + /// + /// An delegate to perform on each element of the collection. + /// + /// + /// Example usage: + /// + /// List<string> fruits = new List<string> { "apple", "banana", "cherry" }; + /// fruits.ForEachItem(fruit => Console.WriteLine(fruit)); + /// // Output: + /// // apple + /// // banana + /// // cherry + /// IEnumerable<int> numbers = new List<int> { 1, 2, 3 }; + /// numbers.ForEachItem(number => Console.WriteLine(number * 2)); + /// // Output: + /// // 2 + /// // 4 + /// // 6 + /// + /// + public static void ForEachItem(this IEnumerable collection, Action action) + { + foreach (var item in collection) + action(item); + } +} \ No newline at end of file diff --git a/src/Constants/Application.cs b/src/Constants/Application.cs new file mode 100644 index 0000000..1745dc9 --- /dev/null +++ b/src/Constants/Application.cs @@ -0,0 +1,37 @@ +namespace OllamaSharp.Constants; + +/// +/// Contains constant values used throughout the application. +/// +internal static class Application +{ + public const string Ollama = "ollama"; + public const string KeepAlive = "keep_alive"; + public const string Truncate = "truncate"; + public const string LoadDuration = "load_duration"; + public const string TotalDuration = "total_duration"; + public const string PromptEvalDuration = "prompt_eval_duration"; + public const string PromptEvalCount = "prompt_eval_count"; + public const string EvalDuration = "eval_duration"; + public const string EvalCount = "eval_count"; + public const string Context = "context"; + public const string Done = "done"; + public const string Response = "response"; + public const string CreatedAt = "created_at"; + public const string Model = "model"; + + public const string Assistant = "assistant"; + public const string System = "system"; + public const string User = "user"; + public const string Tool = "tool"; + + public const string Length = "length"; + public const string Stop = "stop"; + + public const string Object = "object"; + public const string Function = "function"; + + public const string Json = "json"; + + public const string NotApplicable = "n/a"; +} \ No newline at end of file diff --git a/src/Constants/Endpoints.cs b/src/Constants/Endpoints.cs new file mode 100644 index 0000000..eb53580 --- /dev/null +++ b/src/Constants/Endpoints.cs @@ -0,0 +1,26 @@ +namespace OllamaSharp.Constants; + +/// +/// Provides a collection of constant endpoint URLs used by the API in the OllamaSharp library. +/// +/// +///

+/// This static class contains various string constants that represent API endpoints. These constants are used primarily +/// in API client implementations for making requests to specific functionality provided by the backend API. +///

+///
+internal static class Endpoints +{ + public const string CreateModel = "api/create"; + public const string DeleteModel = "api/delete"; + public const string ListLocalModels = "api/tags"; + public const string ListRunningModels = "api/ps"; + public const string ShowModel = "api/show"; + public const string CopyModel = "api/copy"; + public const string PullModel = "api/pull"; + public const string PushModel = "api/push"; + public const string Embed = "api/embed"; + public const string Chat = "api/chat"; + public const string Version = "api/version"; + public const string Generate = "api/generate"; +} \ No newline at end of file diff --git a/src/Constants/MimeTypes.cs b/src/Constants/MimeTypes.cs new file mode 100644 index 0000000..8baca48 --- /dev/null +++ b/src/Constants/MimeTypes.cs @@ -0,0 +1,12 @@ +namespace OllamaSharp.Constants; + +/// +/// Provides predefined MIME type constants to be used across the application. +///

+/// MIME types are used to specify the format of data being sent or received in HTTP requests. +///

+///
+internal static class MimeTypes +{ + public const string Json = "application/json"; +} \ No newline at end of file diff --git a/src/HttpRequestMessageExtensions.cs b/src/HttpRequestMessageExtensions.cs index 264721d..2be3462 100644 --- a/src/HttpRequestMessageExtensions.cs +++ b/src/HttpRequestMessageExtensions.cs @@ -1,5 +1,3 @@ -using System.Collections.Generic; -using System.Net.Http; using System.Net.Http.Headers; using OllamaSharp.Models; @@ -18,14 +16,8 @@ internal static class HttpRequestMessageExtensions /// An optional to get additional custom headers from. public static void ApplyCustomHeaders(this HttpRequestMessage requestMessage, Dictionary headers, OllamaRequest? ollamaRequest) { - foreach (var header in headers) - AddOrUpdateHeaderValue(requestMessage.Headers, header.Key, header.Value); - - if (ollamaRequest != null) - { - foreach (var header in ollamaRequest.CustomHeaders) - AddOrUpdateHeaderValue(requestMessage.Headers, header.Key, header.Value); - } + var concatenated = headers.Concat(ollamaRequest?.CustomHeaders ?? []); + concatenated.ForEachItem(header => requestMessage.Headers.AddOrUpdateHeaderValue(header.Key, header.Value)); } /// @@ -34,11 +26,9 @@ public static void ApplyCustomHeaders(this HttpRequestMessage requestMessage, Di /// The collection to update. /// The key of the header to add or update. /// The value of the header to add or update. - private static void AddOrUpdateHeaderValue(HttpRequestHeaders requestMessageHeaders, string headerKey, string headerValue) + private static void AddOrUpdateHeaderValue(this HttpRequestHeaders requestMessageHeaders, string headerKey, string headerValue) { - if (requestMessageHeaders.Contains(headerKey)) - requestMessageHeaders.Remove(headerKey); - + requestMessageHeaders.Remove(headerKey); requestMessageHeaders.Add(headerKey, headerValue); } -} +} \ No newline at end of file diff --git a/src/MicrosoftAi/AbstractionMapper.cs b/src/MicrosoftAi/AbstractionMapper.cs index 476d480..e72a260 100644 --- a/src/MicrosoftAi/AbstractionMapper.cs +++ b/src/MicrosoftAi/AbstractionMapper.cs @@ -1,11 +1,10 @@ -using System; -using System.Collections.Generic; -using System.Linq; using System.Text.Json; using System.Text.Json.Nodes; using Microsoft.Extensions.AI; +using OllamaSharp.Constants; using OllamaSharp.Models; using OllamaSharp.Models.Chat; +using ChatRole = OllamaSharp.Models.Chat.ChatRole; namespace OllamaSharp.MicrosoftAi; @@ -53,7 +52,7 @@ public static ChatRequest ToOllamaSharpChatRequest(IList chatMessag { var request = new ChatRequest { - Format = Equals(options?.ResponseFormat, ChatResponseFormat.Json) ? "json" : null, + Format = Equals(options?.ResponseFormat, ChatResponseFormat.Json) ? Application.Json : null, KeepAlive = null, Messages = ToOllamaSharpMessages(chatMessages, serializerOptions), Model = options?.ModelId ?? "", // will be set OllamaApiClient.SelectedModel if not set @@ -167,17 +166,17 @@ private static Tool ToOllamaSharpTool(AIFunctionMetadata functionMetadata) Name = functionMetadata.Name, Parameters = new Parameters { - Properties = functionMetadata.Parameters.ToDictionary(p => p.Name, p => new Models.Chat.Property + Properties = functionMetadata.Parameters.ToDictionary(p => p.Name, p => new Property { Description = p.Description, Enum = GetPossibleValues(p.Schema as JsonObject), Type = ToFunctionTypeString(p.Schema as JsonObject) }), Required = functionMetadata.Parameters.Where(p => p.IsRequired).Select(p => p.Name), - Type = "object" + Type = Application.Object } }, - Type = "function" + Type = Application.Function }; } @@ -238,7 +237,7 @@ private static IEnumerable ToOllamaSharpMessages(IList cha CallId = frc.CallId, Result = jsonResult, }, serializerOptions), - Role = Models.Chat.ChatRole.Tool, + Role = ChatRole.Tool, }; } } @@ -282,15 +281,15 @@ private static Message.ToolCall ToOllamaSharpToolCall(FunctionCallContent functi /// /// The chat role to map. /// A object containing the mapped role. - private static Models.Chat.ChatRole ToOllamaSharpRole(Microsoft.Extensions.AI.ChatRole role) + private static ChatRole ToOllamaSharpRole(Microsoft.Extensions.AI.ChatRole role) { return role.Value switch { - "assistant" => Models.Chat.ChatRole.Assistant, - "system" => Models.Chat.ChatRole.System, - "user" => Models.Chat.ChatRole.User, - "tool" => Models.Chat.ChatRole.Tool, - _ => new Models.Chat.ChatRole(role.Value), + Application.Assistant => ChatRole.Assistant, + Application.System => ChatRole.System, + Application.User => ChatRole.User, + Application.Tool => ChatRole.Tool, + _ => new ChatRole(role.Value), }; } @@ -299,17 +298,17 @@ private static Models.Chat.ChatRole ToOllamaSharpRole(Microsoft.Extensions.AI.Ch ///
/// The chat role to map. /// A object containing the mapped role. - private static Microsoft.Extensions.AI.ChatRole ToAbstractionRole(OllamaSharp.Models.Chat.ChatRole? role) + private static Microsoft.Extensions.AI.ChatRole ToAbstractionRole(ChatRole? role) { if (role is null) return new Microsoft.Extensions.AI.ChatRole("unknown"); return role.ToString() switch { - "assistant" => Microsoft.Extensions.AI.ChatRole.Assistant, - "system" => Microsoft.Extensions.AI.ChatRole.System, - "user" => Microsoft.Extensions.AI.ChatRole.User, - "tool" => Microsoft.Extensions.AI.ChatRole.Tool, + Application.Assistant => Microsoft.Extensions.AI.ChatRole.Assistant, + Application.System => Microsoft.Extensions.AI.ChatRole.System, + Application.User => Microsoft.Extensions.AI.ChatRole.User, + Application.Tool => Microsoft.Extensions.AI.ChatRole.Tool, _ => new Microsoft.Extensions.AI.ChatRole(role.ToString()!), }; } @@ -352,7 +351,7 @@ public static ChatMessage ToChatMessage(Message message) if (toolCall.Function is { } function) { var id = Guid.NewGuid().ToString().Substring(0, 8); - contents.Add(new FunctionCallContent(id, function.Name ?? "n/a", function.Arguments) { RawRepresentation = toolCall }); + contents.Add(new FunctionCallContent(id, function.Name ?? Application.NotApplicable, function.Arguments) { RawRepresentation = toolCall }); } } } @@ -376,10 +375,10 @@ private static AdditionalPropertiesDictionary ParseOllamaChatResponseProps(ChatD return new AdditionalPropertiesDictionary { - ["load_duration"] = TimeSpan.FromMilliseconds(response.LoadDuration / NANOSECONDS_PER_MILLISECOND), - ["total_duration"] = TimeSpan.FromMilliseconds(response.TotalDuration / NANOSECONDS_PER_MILLISECOND), - ["prompt_eval_duration"] = TimeSpan.FromMilliseconds(response.PromptEvalDuration / NANOSECONDS_PER_MILLISECOND), - ["eval_duration"] = TimeSpan.FromMilliseconds(response.EvalDuration / NANOSECONDS_PER_MILLISECOND) + [Application.LoadDuration] = TimeSpan.FromMilliseconds(response.LoadDuration / NANOSECONDS_PER_MILLISECOND), + [Application.TotalDuration] = TimeSpan.FromMilliseconds(response.TotalDuration / NANOSECONDS_PER_MILLISECOND), + [Application.PromptEvalDuration] = TimeSpan.FromMilliseconds(response.PromptEvalDuration / NANOSECONDS_PER_MILLISECOND), + [Application.EvalDuration] = TimeSpan.FromMilliseconds(response.EvalDuration / NANOSECONDS_PER_MILLISECOND) }; } @@ -394,8 +393,8 @@ private static AdditionalPropertiesDictionary ParseOllamaEmbedResponseProps(Embe return new AdditionalPropertiesDictionary { - ["load_duration"] = TimeSpan.FromMilliseconds((response.LoadDuration ?? 0) / NANOSECONDS_PER_MILLISECOND), - ["total_duration"] = TimeSpan.FromMilliseconds((response.TotalDuration ?? 0) / NANOSECONDS_PER_MILLISECOND) + [Application.LoadDuration] = TimeSpan.FromMilliseconds((response.LoadDuration ?? 0) / NANOSECONDS_PER_MILLISECOND), + [Application.TotalDuration] = TimeSpan.FromMilliseconds((response.TotalDuration ?? 0) / NANOSECONDS_PER_MILLISECOND) }; } @@ -409,8 +408,8 @@ private static AdditionalPropertiesDictionary ParseOllamaEmbedResponseProps(Embe return ollamaDoneReason switch { null => null, - "length" => ChatFinishReason.Length, - "stop" => ChatFinishReason.Stop, + Application.Length => ChatFinishReason.Length, + Application.Stop => ChatFinishReason.Stop, _ => new ChatFinishReason(ollamaDoneReason), }; } @@ -451,10 +450,10 @@ public static EmbedRequest ToOllamaEmbedRequest(IEnumerable values, Embe if (options?.AdditionalProperties is { } requestProps) { - if (requestProps.TryGetValue("keep_alive", out long keepAlive)) + if (requestProps.TryGetValue(Application.KeepAlive, out long keepAlive)) request.KeepAlive = keepAlive; - if (requestProps.TryGetValue("truncate", out bool truncate)) + if (requestProps.TryGetValue(Application.Truncate, out bool truncate)) request.Truncate = truncate; } diff --git a/src/Models/Chat/ChatDoneResponseStream.cs b/src/Models/Chat/ChatDoneResponseStream.cs index 30985fd..a910ae6 100644 --- a/src/Models/Chat/ChatDoneResponseStream.cs +++ b/src/Models/Chat/ChatDoneResponseStream.cs @@ -1,4 +1,5 @@ using System.Text.Json.Serialization; +using OllamaSharp.Constants; namespace OllamaSharp.Models.Chat; @@ -10,13 +11,13 @@ public class ChatDoneResponseStream : ChatResponseStream /// /// The time spent generating the response /// - [JsonPropertyName("total_duration")] + [JsonPropertyName(Application.TotalDuration)] public long TotalDuration { get; set; } /// /// The time spent in nanoseconds loading the model /// - [JsonPropertyName("load_duration")] + [JsonPropertyName(Application.LoadDuration)] public long LoadDuration { get; set; } /// @@ -28,7 +29,7 @@ public class ChatDoneResponseStream : ChatResponseStream /// /// The time spent in nanoseconds evaluating the prompt /// - [JsonPropertyName("prompt_eval_duration")] + [JsonPropertyName(Application.PromptEvalDuration)] public long PromptEvalDuration { get; set; } /// @@ -40,7 +41,7 @@ public class ChatDoneResponseStream : ChatResponseStream /// /// The time in nanoseconds spent generating the response /// - [JsonPropertyName("eval_duration")] + [JsonPropertyName(Application.EvalDuration)] public long EvalDuration { get; set; } /// diff --git a/src/Models/Chat/MessageBuilder.cs b/src/Models/Chat/MessageBuilder.cs index 96324bb..b3d6b5f 100644 --- a/src/Models/Chat/MessageBuilder.cs +++ b/src/Models/Chat/MessageBuilder.cs @@ -1,19 +1,43 @@ -using System.Collections.Generic; using System.Text; namespace OllamaSharp.Models.Chat; /// -/// A message builder that can build messages from streamed chunks +/// A builder class for constructing a by appending multiple message chunks. /// public class MessageBuilder { private readonly StringBuilder _contentBuilder = new(); + private List _images = []; + private List _toolCalls = []; /// - /// Appends a message chunk to build the final message + /// Appends a chat response stream chunk to the message under construction. /// - /// The message chunk to append to the final message + /// The instance containing a message and additional data to append. If the message is nulln, no operation is performed. + /// + /// This method processes the provided chunk by appending its message content to the underlying content builder, + /// updates the based on the chunk's message role, and adds any related images or tool calls if present. + /// + /// + /// Example usage: + /// + /// var builder = new MessageBuilder(); + /// var chunk = new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "Hello, World!", + /// Role = ChatRole.User, + /// Images = new[] { "iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAAGUlEQVR4nGJpfnqXARtgwio6aCUAAQAA///KcwJYgRBQbAAAAABJRU5ErkJggg==" }, + /// ToolCalls = new List<Message.ToolCall>() + /// } + /// }; + /// builder.Append(chunk); + /// var resultMessage = builder.ToMessage(); + /// Console.WriteLine(resultMessage.Content); // Output: Hello, World! + /// + /// public void Append(ChatResponseStream? chunk) { if (chunk?.Message is null) @@ -22,44 +46,169 @@ public void Append(ChatResponseStream? chunk) _contentBuilder.Append(chunk.Message.Content ?? ""); Role = chunk.Message.Role; - if (chunk.Message.Images is not null) - Images.AddRange(chunk.Message.Images); - - if (chunk.Message.ToolCalls is not null) - ToolCalls.AddRange(chunk.Message.ToolCalls); + _images.AddRangeIfNotNull(chunk.Message.Images); + _toolCalls.AddRangeIfNotNull(chunk.Message.ToolCalls); } /// - /// Builds the message out of the streamed chunks that were appended before + /// Converts the current state of the message builder into a object. /// - public Message ToMessage() - { - return new Message - { - Content = _contentBuilder.ToString(), - Images = Images.ToArray(), - Role = Role, - ToolCalls = ToolCalls - }; - } + /// + /// A instance containing the following elements: + ///
    + ///
  • : The combined content built using appended chunks.
  • + ///
  • : The role assigned to the message.
  • + ///
  • : An array of image strings associated with the message.
  • + ///
  • : A collection of tool call objects associated with the message.
  • + ///
+ ///
+ /// + /// Example usage: + /// + /// var builder = new MessageBuilder(); + /// builder.Role = ChatRole.Assistant; + /// // Append content (this would typically be done with Append method) + /// builder.Append(new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "Generated content from assistant." + /// } + /// }); + /// // Convert to a Message object + /// var finalMessage = builder.ToMessage(); + /// Console.WriteLine(finalMessage.Content); // Output: Generated content from assistant. + /// Console.WriteLine(finalMessage.Role); // Output: Assistant + /// + /// + public Message ToMessage() => + new() { Content = _contentBuilder.ToString(), Images = _images.ToArray(), Role = Role, ToolCalls = _toolCalls }; + /// - /// The role of the message, either system, user or assistant + /// Represents the role associated with a chat message. Roles are used to determine the purpose or origin of a message, such as system, user, assistant, or tool. /// - public ChatRole? Role { get; set; } + /// + ///

The property is typically used to indicate the sender's context or role within a conversation.

+ /// + ///
  • System: Represents system-generated messages.
  • + ///
  • User: Represents messages sent by the user.
  • + ///
  • Assistant: Represents messages generated by an assistant or AI model.
  • + ///
  • Tool: Represents messages or actions triggered by external tools.
  • + ///
    + ///
    + /// + /// Example usage: + /// + /// var messageBuilder = new MessageBuilder(); + /// var chunk = new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "What can I help you with?", + /// Role = ChatRole.Assistant + /// } + /// }; + /// messageBuilder.Append(chunk); + /// Console.WriteLine(messageBuilder.Role); // Output: Assistant + /// + /// + public ChatRole? Role { get; private set; } /// - /// Base64-encoded images (for multimodal models such as llava) + /// Represents the collection of image references included in a message. /// - public List Images { get; set; } = []; + /// + ///

    This property contains a read-only collection of image file paths or URIs associated with the message content. + /// The property is often utilized in scenarios where messages include supplementary visual content, + /// such as in chat interfaces, AI-generated responses with images, or tools requiring multimedia integration.

    + ///
    + /// + /// Example usage: + /// + /// var builder = new MessageBuilder(); + /// var chunk = new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "Here is an example image:", + /// Role = ChatRole.Assistant, + /// Images = new[] { "example_image.png" } // Note: Images are base64 encoded, this is just an example + /// } + /// }; + /// builder.Append(chunk); + /// var resultMessage = builder.ToMessage(); + /// Console.WriteLine(string.Join(", ", resultMessage.Images)); // Output: example_image.png + /// + /// + public IReadOnlyCollection Images { get; } /// - /// A list of tools the model wants to use (for models that support function calls, such as llama3.1) + /// Represents the collection of tool calls associated with a chat message. /// - public List ToolCalls { get; set; } = []; + /// + ///

    The property is used to store references to external tools or functions invoked during a chat conversation.

    + /// Tool calls can include various functions or actions that were triggered by the message. For instance: + /// + ///
  • Fetching data asynchronously from APIs.
  • + ///
  • Executing background processes.
  • + ///
  • Triggering integrations with third-party tools.
  • + ///
    + /// This property aggregates and holds all tool calls appended via the builder during message construction. + ///
    + /// + /// Example usage: + /// + /// var messageBuilder = new MessageBuilder(); + /// var toolCall = new Message.ToolCall + /// { + /// Function = new Message.Function() + /// }; + /// var chunk = new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "Triggered a tool call", + /// ToolCalls = new[] { toolCall } + /// } + /// }; + /// messageBuilder.Append(chunk); + /// Console.WriteLine(messageBuilder.ToolCalls.Count); // Output: 1 + /// + /// + public IReadOnlyCollection ToolCalls { get; } + /// - /// Gets whether the message builder received message chunks yet + /// Indicates whether the current instance contains any content, images, or tool calls. /// - public bool HasValue => _contentBuilder.Length > 0 || ToolCalls.Count > 0; -} + /// + /// The property evaluates to true if: + /// + ///
  • The internal content builder has accumulated text.
  • + ///
  • There are any tool calls added to the .
  • + ///
  • There are images included in the .
  • + ///
    + /// Otherwise, it returns false. + ///

    This property is useful for verifying whether the builder contains meaningful data before processing further, + /// such as converting it to a or appending additional elements to it.

    + ///
    + /// + /// Example usage: + /// + /// var messageBuilder = new MessageBuilder(); + /// Console.WriteLine(messageBuilder.HasValue); // Output: False + /// // Append a new chunk of content + /// messageBuilder.Append(new ChatResponseStream + /// { + /// Message = new Message + /// { + /// Content = "Hello, how can I assist you?", + /// Role = ChatRole.Assistant + /// } + /// }); + /// Console.WriteLine(messageBuilder.HasValue); // Output: True + /// + /// + public bool HasValue => _contentBuilder.Length > 0 || _toolCalls.Any() || _images.Any(); +} \ No newline at end of file diff --git a/src/Models/Embed.cs b/src/Models/Embed.cs index 2d58ca7..5200ee0 100644 --- a/src/Models/Embed.cs +++ b/src/Models/Embed.cs @@ -1,5 +1,5 @@ -using System.Collections.Generic; using System.Text.Json.Serialization; +using OllamaSharp.Constants; namespace OllamaSharp.Models; @@ -60,13 +60,13 @@ public class EmbedResponse /// /// The time spent generating the response /// - [JsonPropertyName("total_duration")] + [JsonPropertyName(Application.TotalDuration)] public long? TotalDuration { get; set; } /// /// The time spent in nanoseconds loading the model /// - [JsonPropertyName("load_duration")] + [JsonPropertyName(Application.LoadDuration)] public long? LoadDuration { get; set; } /// diff --git a/src/Models/Generate.cs b/src/Models/Generate.cs index 368ee94..8231351 100644 --- a/src/Models/Generate.cs +++ b/src/Models/Generate.cs @@ -1,5 +1,7 @@ using System; +using System.Net.Mime; using System.Text.Json.Serialization; +using OllamaSharp.Constants; namespace OllamaSharp.Models; @@ -108,13 +110,13 @@ public class GenerateResponseStream /// /// The model that generated the response /// - [JsonPropertyName("model")] + [JsonPropertyName(Application.Model)] public string Model { get; set; } = null!; /// /// Gets or sets the time the response was generated. /// - [JsonPropertyName("created_at")] + [JsonPropertyName(Application.CreatedAt)] public string? CreatedAtString { get => _createdAtString; @@ -142,13 +144,13 @@ public DateTimeOffset? CreatedAt /// /// The response generated by the model /// - [JsonPropertyName("response")] + [JsonPropertyName(Application.Response)] public string Response { get; set; } = null!; /// /// Whether the response is complete /// - [JsonPropertyName("done")] + [JsonPropertyName(Application.Done)] public bool Done { get; set; } } @@ -161,42 +163,42 @@ public class GenerateDoneResponseStream : GenerateResponseStream /// An encoding of the conversation used in this response, this can be /// sent in the next request to keep a conversational memory /// - [JsonPropertyName("context")] + [JsonPropertyName(Application.Context)] public long[] Context { get; set; } = null!; /// /// The time spent generating the response /// - [JsonPropertyName("total_duration")] + [JsonPropertyName(Application.TotalDuration)] public long TotalDuration { get; set; } /// /// The time spent in nanoseconds loading the model /// - [JsonPropertyName("load_duration")] + [JsonPropertyName(Application.LoadDuration)] public long LoadDuration { get; set; } /// /// The number of tokens in the prompt /// - [JsonPropertyName("prompt_eval_count")] + [JsonPropertyName(Application.PromptEvalCount)] public int PromptEvalCount { get; set; } /// /// The time spent in nanoseconds evaluating the prompt /// - [JsonPropertyName("prompt_eval_duration")] + [JsonPropertyName(Application.PromptEvalDuration)] public long PromptEvalDuration { get; set; } /// /// The number of tokens in the response /// - [JsonPropertyName("eval_count")] + [JsonPropertyName(Application.EvalCount)] public int EvalCount { get; set; } /// /// The time in nanoseconds spent generating the response /// - [JsonPropertyName("eval_duration")] + [JsonPropertyName(Application.EvalDuration)] public long EvalDuration { get; set; } } \ No newline at end of file diff --git a/src/OllamaApiClient.cs b/src/OllamaApiClient.cs index c4fe17c..ca20966 100644 --- a/src/OllamaApiClient.cs +++ b/src/OllamaApiClient.cs @@ -1,16 +1,12 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net.Http; +using System.Net; using System.Runtime.CompilerServices; using System.Text; using System.Text.Encodings.Web; using System.Text.Json; using System.Text.Json.Nodes; -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.AI; +using OllamaSharp.Constants; +using OllamaSharp.MicrosoftAi; using OllamaSharp.Models; using OllamaSharp.Models.Chat; using OllamaSharp.Models.Exceptions; @@ -108,17 +104,15 @@ public OllamaApiClient(HttpClient client, string defaultModel = "") /// public async IAsyncEnumerable CreateModelAsync(CreateModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - await foreach (var result in StreamPostAsync("api/create", request, cancellationToken).ConfigureAwait(false)) + await foreach (var result in StreamPostAsync(Endpoints.CreateModel, request, cancellationToken).ConfigureAwait(false)) yield return result; } /// public async Task DeleteModelAsync(DeleteModelRequest request, CancellationToken cancellationToken = default) { - using var requestMessage = new HttpRequestMessage(HttpMethod.Delete, "api/delete") - { - Content = new StringContent(JsonSerializer.Serialize(request, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; + using var requestMessage = new HttpRequestMessage(HttpMethod.Delete, Endpoints.DeleteModel); + requestMessage.Content = new StringContent(JsonSerializer.Serialize(request, OutgoingJsonSerializerOptions), Encoding.UTF8, MimeTypes.Json); await SendToOllamaAsync(requestMessage, request, HttpCompletionOption.ResponseContentRead, cancellationToken).ConfigureAwait(false); } @@ -126,36 +120,36 @@ public async Task DeleteModelAsync(DeleteModelRequest request, CancellationToken /// public async Task> ListLocalModelsAsync(CancellationToken cancellationToken = default) { - var data = await GetAsync("api/tags", cancellationToken).ConfigureAwait(false); + var data = await GetAsync(Endpoints.ListLocalModels, cancellationToken).ConfigureAwait(false); return data.Models; } /// public async Task> ListRunningModelsAsync(CancellationToken cancellationToken = default) { - var data = await GetAsync("api/ps", cancellationToken).ConfigureAwait(false); + var data = await GetAsync(Endpoints.ListRunningModels, cancellationToken).ConfigureAwait(false); return data.RunningModels; } /// public Task ShowModelAsync(ShowModelRequest request, CancellationToken cancellationToken = default) - => PostAsync("api/show", request, cancellationToken); + => PostAsync(Endpoints.ShowModel, request, cancellationToken); /// public Task CopyModelAsync(CopyModelRequest request, CancellationToken cancellationToken = default) - => PostAsync("api/copy", request, cancellationToken); + => PostAsync(Endpoints.CopyModel, request, cancellationToken); /// public async IAsyncEnumerable PullModelAsync(PullModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - await foreach (var result in StreamPostAsync("api/pull", request, cancellationToken).ConfigureAwait(false)) + await foreach (var result in StreamPostAsync(Endpoints.PullModel, request, cancellationToken).ConfigureAwait(false)) yield return result; } /// public async IAsyncEnumerable PushModelAsync(PushModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - var stream = StreamPostAsync("api/push", request, cancellationToken).ConfigureAwait(false); + var stream = StreamPostAsync(Endpoints.PushModel, request, cancellationToken).ConfigureAwait(false); await foreach (var result in stream.ConfigureAwait(false)) yield return result; @@ -167,7 +161,7 @@ public Task EmbedAsync(EmbedRequest request, CancellationToken ca if (string.IsNullOrEmpty(request.Model)) request.Model = SelectedModel; - return PostAsync("api/embed", request, cancellationToken); + return PostAsync(Endpoints.Embed, request, cancellationToken); } /// @@ -192,10 +186,8 @@ public Task EmbedAsync(EmbedRequest request, CancellationToken ca See Ollama docs at https://github.com/ollama/ollama/blob/main/docs/api.md#parameters-1 to see whether support has since been added. """); - using var requestMessage = new HttpRequestMessage(HttpMethod.Post, "api/chat") - { - Content = new StringContent(JsonSerializer.Serialize(request, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; + using var requestMessage = new HttpRequestMessage(HttpMethod.Post, Endpoints.Chat); + requestMessage.Content = new StringContent(JsonSerializer.Serialize(request, OutgoingJsonSerializerOptions), Encoding.UTF8, MimeTypes.Json); var completion = request.Stream ? HttpCompletionOption.ResponseHeadersRead @@ -210,7 +202,7 @@ public Task EmbedAsync(EmbedRequest request, CancellationToken ca /// public async Task IsRunningAsync(CancellationToken cancellationToken = default) { - using var requestMessage = new HttpRequestMessage(HttpMethod.Get, ""); // without route returns "Ollama is running" + using var requestMessage = new HttpRequestMessage(HttpMethod.Get, string.Empty); // without route returns "Ollama is running" using var response = await SendToOllamaAsync(requestMessage, null, HttpCompletionOption.ResponseContentRead, cancellationToken).ConfigureAwait(false); @@ -222,17 +214,14 @@ public async Task IsRunningAsync(CancellationToken cancellationToken = def /// public async Task GetVersionAsync(CancellationToken cancellationToken = default) { - var data = await GetAsync("api/version", cancellationToken).ConfigureAwait(false); + var data = await GetAsync(Endpoints.Version, cancellationToken).ConfigureAwait(false); var versionString = data["version"]?.ToString() ?? throw new InvalidOperationException("Could not get version from response."); return Version.Parse(versionString); } private async IAsyncEnumerable GenerateCompletionAsync(GenerateRequest generateRequest, [EnumeratorCancellation] CancellationToken cancellationToken) { - using var requestMessage = new HttpRequestMessage(HttpMethod.Post, "api/generate") - { - Content = new StringContent(JsonSerializer.Serialize(generateRequest, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; + using var requestMessage = CreateRequestMessage(HttpMethod.Post, Endpoints.Generate, generateRequest); var completion = generateRequest.Stream ? HttpCompletionOption.ResponseHeadersRead @@ -246,32 +235,28 @@ public async Task GetVersionAsync(CancellationToken cancellationToken = private async Task GetAsync(string endpoint, CancellationToken cancellationToken) { - using var requestMessage = new HttpRequestMessage(HttpMethod.Get, endpoint); - + using var requestMessage = CreateRequestMessage(HttpMethod.Get, endpoint); + using var response = await SendToOllamaAsync(requestMessage, null, HttpCompletionOption.ResponseContentRead, cancellationToken).ConfigureAwait(false); - + using var responseStream = await response.Content.ReadAsStreamAsync().ConfigureAwait(false); return (await JsonSerializer.DeserializeAsync(responseStream, IncomingJsonSerializerOptions, cancellationToken))!; } + + private async Task PostAsync(string endpoint, TRequest ollamaRequest, CancellationToken cancellationToken) where TRequest : OllamaRequest { - using var requestMessage = new HttpRequestMessage(HttpMethod.Post, endpoint) - { - Content = new StringContent(JsonSerializer.Serialize(ollamaRequest, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; + using var requestMessage = CreateRequestMessage(HttpMethod.Post, endpoint, ollamaRequest); await SendToOllamaAsync(requestMessage, ollamaRequest, HttpCompletionOption.ResponseContentRead, cancellationToken).ConfigureAwait(false); } private async Task PostAsync(string endpoint, TRequest ollamaRequest, CancellationToken cancellationToken) where TRequest : OllamaRequest { - using var requestMessage = new HttpRequestMessage(HttpMethod.Post, endpoint) - { - Content = new StringContent(JsonSerializer.Serialize(ollamaRequest, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; - + using var requestMessage = CreateRequestMessage(HttpMethod.Post, endpoint, ollamaRequest); + using var response = await SendToOllamaAsync(requestMessage, ollamaRequest, HttpCompletionOption.ResponseContentRead, cancellationToken).ConfigureAwait(false); using var responseStream = await response.Content.ReadAsStreamAsync().ConfigureAwait(false); @@ -281,16 +266,26 @@ private async Task PostAsync(string endpoint, TR private async IAsyncEnumerable StreamPostAsync(string endpoint, TRequest ollamaRequest, [EnumeratorCancellation] CancellationToken cancellationToken) where TRequest : OllamaRequest { - using var requestMessage = new HttpRequestMessage(HttpMethod.Post, endpoint) - { - Content = new StringContent(JsonSerializer.Serialize(ollamaRequest, OutgoingJsonSerializerOptions), Encoding.UTF8, "application/json") - }; + using var requestMessage = CreateRequestMessage(HttpMethod.Post, endpoint, ollamaRequest); using var response = await SendToOllamaAsync(requestMessage, ollamaRequest, HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); await foreach (var result in ProcessStreamedResponseAsync(response, cancellationToken).ConfigureAwait(false)) yield return result; } + + + private HttpRequestMessage CreateRequestMessage(HttpMethod method, string endpoint) => new(method, endpoint); + + private HttpRequestMessage CreateRequestMessage(HttpMethod method, string endpoint, TRequest ollamaRequest) where TRequest : OllamaRequest + { + var requestMessage = new HttpRequestMessage(method, endpoint); + requestMessage.Content = GetJsonContent(ollamaRequest); + return requestMessage; + } + + private StringContent GetJsonContent(TRequest ollamaRequest) where TRequest : OllamaRequest => + new(JsonSerializer.Serialize(ollamaRequest, OutgoingJsonSerializerOptions), Encoding.UTF8, MimeTypes.Json); private async IAsyncEnumerable ProcessStreamedResponseAsync(HttpResponseMessage response, [EnumeratorCancellation] CancellationToken cancellationToken) { @@ -356,7 +351,7 @@ protected virtual async Task SendToOllamaAsync(HttpRequestM private async Task EnsureSuccessStatusCodeAsync(HttpResponseMessage response) { - if (response.StatusCode == System.Net.HttpStatusCode.BadRequest) + if (response.StatusCode == HttpStatusCode.BadRequest) { var body = await response.Content.ReadAsStringAsync().ConfigureAwait(false) ?? string.Empty; @@ -387,33 +382,33 @@ private async Task EnsureSuccessStatusCodeAsync(HttpResponseMessage response) #region IChatClient and IEmbeddingGenerator implementation /// - ChatClientMetadata IChatClient.Metadata => new("ollama", Uri, SelectedModel); + ChatClientMetadata IChatClient.Metadata => new(Application.Ollama, Uri, SelectedModel); /// - EmbeddingGeneratorMetadata IEmbeddingGenerator>.Metadata => new("ollama", Uri, SelectedModel); + EmbeddingGeneratorMetadata IEmbeddingGenerator>.Metadata => new(Application.Ollama, Uri, SelectedModel); /// async Task IChatClient.CompleteAsync(IList chatMessages, ChatOptions? options, CancellationToken cancellationToken) { - var request = MicrosoftAi.AbstractionMapper.ToOllamaSharpChatRequest(chatMessages, options, stream: false, OutgoingJsonSerializerOptions); + var request = AbstractionMapper.ToOllamaSharpChatRequest(chatMessages, options, stream: false, OutgoingJsonSerializerOptions); var response = await ChatAsync(request, cancellationToken).StreamToEndAsync().ConfigureAwait(false); - return MicrosoftAi.AbstractionMapper.ToChatCompletion(response, response?.Model ?? request.Model ?? SelectedModel) ?? new ChatCompletion([]); + return AbstractionMapper.ToChatCompletion(response, response?.Model ?? request.Model ?? SelectedModel) ?? new ChatCompletion([]); } /// async IAsyncEnumerable IChatClient.CompleteStreamingAsync(IList chatMessages, ChatOptions? options, [EnumeratorCancellation] CancellationToken cancellationToken) { - var request = MicrosoftAi.AbstractionMapper.ToOllamaSharpChatRequest(chatMessages, options, stream: true, OutgoingJsonSerializerOptions); + var request = AbstractionMapper.ToOllamaSharpChatRequest(chatMessages, options, stream: true, OutgoingJsonSerializerOptions); await foreach (var response in ChatAsync(request, cancellationToken).ConfigureAwait(false)) - yield return MicrosoftAi.AbstractionMapper.ToStreamingChatCompletionUpdate(response); + yield return AbstractionMapper.ToStreamingChatCompletionUpdate(response); } /// async Task>> IEmbeddingGenerator>.GenerateAsync(IEnumerable values, EmbeddingGenerationOptions? options, CancellationToken cancellationToken) { - var request = MicrosoftAi.AbstractionMapper.ToOllamaEmbedRequest(values, options); + var request = AbstractionMapper.ToOllamaEmbedRequest(values, options); var result = await EmbedAsync(request, cancellationToken).ConfigureAwait(false); - return MicrosoftAi.AbstractionMapper.ToGeneratedEmbeddings(request, result, request.Model ?? SelectedModel); + return AbstractionMapper.ToGeneratedEmbeddings(request, result, request.Model ?? SelectedModel); } /// @@ -433,7 +428,7 @@ public void Dispose() GC.SuppressFinalize(this); if (_disposeHttpClient) - _client?.Dispose(); + _client.Dispose(); } #endregion diff --git a/src/OllamaApiClientExtensions.cs b/src/OllamaApiClientExtensions.cs index 1e9e180..1a99dcb 100644 --- a/src/OllamaApiClientExtensions.cs +++ b/src/OllamaApiClientExtensions.cs @@ -1,6 +1,3 @@ -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; using OllamaSharp.Models; namespace OllamaSharp; diff --git a/test/AbstractionMapperTests.cs b/test/AbstractionMapperTests.cs index 045bd96..7a4fa6d 100644 --- a/test/AbstractionMapperTests.cs +++ b/test/AbstractionMapperTests.cs @@ -3,6 +3,7 @@ using Microsoft.Extensions.AI; using NUnit.Framework; using OllamaSharp; +using OllamaSharp.Constants; using OllamaSharp.MicrosoftAi; using OllamaSharp.Models; using OllamaSharp.Models.Chat; @@ -562,10 +563,10 @@ public void Maps_Known_Properties() var chatCompletion = AbstractionMapper.ToChatCompletion(stream, usedModel: null); chatCompletion.AdditionalProperties.Should().NotBeNull(); - chatCompletion.AdditionalProperties["eval_duration"].Should().Be(TimeSpan.FromSeconds(2.222222222)); - chatCompletion.AdditionalProperties["load_duration"].Should().Be(TimeSpan.FromSeconds(3.333333333)); - chatCompletion.AdditionalProperties["total_duration"].Should().Be(TimeSpan.FromSeconds(6.666666666)); - chatCompletion.AdditionalProperties["prompt_eval_duration"].Should().Be(TimeSpan.FromSeconds(5.555555555)); + chatCompletion.AdditionalProperties[Application.EvalDuration].Should().Be(TimeSpan.FromSeconds(2.222222222)); + chatCompletion.AdditionalProperties[Application.LoadDuration].Should().Be(TimeSpan.FromSeconds(3.333333333)); + chatCompletion.AdditionalProperties[Application.TotalDuration].Should().Be(TimeSpan.FromSeconds(6.666666666)); + chatCompletion.AdditionalProperties[Application.PromptEvalDuration].Should().Be(TimeSpan.FromSeconds(5.555555555)); chatCompletion.Choices.Should().HaveCount(1); chatCompletion.Choices.Single().Text.Should().Be("Hi."); chatCompletion.CompletionId.Should().Be(ollamaCreatedStamp);