diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2c68e17..d95545b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,9 +7,9 @@ on:
pull_request:
branches: [ "main", "master" ]
paths: [ 'src/**' ]
-
-jobs:
-
+
+
+jobs:
build:
runs-on: ubuntu-latest
@@ -45,7 +45,7 @@ jobs:
run: dotnet build --no-restore --configuration=Release /p:Version=${{steps.gitversion.outputs.semVer}}
- name: Test
- run: dotnet test --no-build --configuration=Release --verbosity normal
+ run: dotnet test --no-build --configuration=Release --verbosity normal --filter 'FullyQualifiedName!~FunctionalTests'
- name: pack nuget packages
run: dotnet pack --output nupkgs --configuration=Release --no-restore --no-build /p:PackageVersion=${{steps.gitversion.outputs.semVer}}
@@ -59,4 +59,4 @@ jobs:
- name: upload nuget package
if: github.event_name != 'pull_request'
- run: dotnet nuget push nupkgs/OllamaSharp*.nupkg -k ${{ secrets.NUGET_API_KEY }} -s https://api.nuget.org/v3/index.json
+ run: dotnet nuget push nupkgs/OllamaSharp*.nupkg -k ${{ secrets.NUGET_API_KEY }} -s https://api.nuget.org/v3/index.json
\ No newline at end of file
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..74de1ce
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,40 @@
+name: docfx Build and Deploy
+
+on:
+ push:
+ branches: [ "main" ]
+ workflow_dispatch:
+
+permissions:
+ actions: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: "pages"
+ cancel-in-progress: false
+
+jobs:
+ publish-docs:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Dotnet Setup
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 8.x
+
+ - run: dotnet tool update -g docfx
+ - run: docfx ./docfx.json
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: './_site'
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index f08db5b..240577a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -396,4 +396,8 @@ FodyWeavers.xsd
# JetBrains Rider
*.sln.iml
-/.idea
\ No newline at end of file
+/.idea
+
+# DocFX
+_site/
+api/
\ No newline at end of file
diff --git a/OllamaSharp.FunctionalTests/ChatTests.cs b/OllamaSharp.FunctionalTests/ChatTests.cs
new file mode 100644
index 0000000..585d8f5
--- /dev/null
+++ b/OllamaSharp.FunctionalTests/ChatTests.cs
@@ -0,0 +1,55 @@
+using FluentAssertions;
+using Microsoft.Extensions.AI;
+
+namespace OllamaSharp.FunctionalTests;
+
+public class ChatTests
+{
+ private readonly Uri _baseUri = new("http://localhost:11434");
+ private readonly string _model = "llama3.2:1b";
+
+#pragma warning disable NUnit1032
+ private OllamaApiClient _client = null!;
+ private Chat _chat = null!;
+#pragma warning restore NUnit1032
+
+ [SetUp]
+ public async Task Setup()
+ {
+ // Set up the test environment
+ _client = new OllamaApiClient(_baseUri);
+ _chat = new Chat(_client);
+
+ await _client.PullIfNotExistsAsync(_model);
+ }
+
+ [TearDown]
+ public Task Teardown()
+ {
+ // Clean up the test environment
+ ((IChatClient?)_client)?.Dispose();
+
+ return Task.CompletedTask;
+ }
+
+
+ [Test]
+ public async Task SendAsync_ShouldSucceed()
+ {
+ // Arrange
+ _client.SelectedModel = _model;
+
+ // Act
+ var response = await _chat
+ .SendAsync("What is the ultimate answer to " +
+ "life, the universe, and everything, as specified in " +
+ "a Hitchhikers Guide to the Galaxy. " +
+ "Provide only the answer.",
+ CancellationToken.None)
+ .StreamToEndAsync();
+
+ // Assert
+ response.Should().NotBeNullOrEmpty();
+ response.Should().ContainAny("42", "forty-two", "forty two");
+ }
+}
\ No newline at end of file
diff --git a/OllamaSharp.FunctionalTests/Helpers.cs b/OllamaSharp.FunctionalTests/Helpers.cs
new file mode 100644
index 0000000..7d52ce8
--- /dev/null
+++ b/OllamaSharp.FunctionalTests/Helpers.cs
@@ -0,0 +1,18 @@
+using OllamaSharp.Models;
+
+namespace OllamaSharp.FunctionalTests;
+
+public static class Helpers
+{
+ public static async Task PullIfNotExistsAsync(
+ this IOllamaApiClient client,
+ string model)
+ {
+ var modelExists = (await client.ListLocalModelsAsync())
+ .Any(m => m.Name == model);
+
+ if (!modelExists)
+ await client.PullModelAsync(new PullModelRequest { Model = model })
+ .ToListAsync();
+ }
+}
\ No newline at end of file
diff --git a/OllamaSharp.FunctionalTests/OllamaApiClientTests.cs b/OllamaSharp.FunctionalTests/OllamaApiClientTests.cs
new file mode 100644
index 0000000..99c2f04
--- /dev/null
+++ b/OllamaSharp.FunctionalTests/OllamaApiClientTests.cs
@@ -0,0 +1,304 @@
+using FluentAssertions;
+using Microsoft.Extensions.AI;
+using OllamaSharp.Models;
+using OllamaSharp.Models.Chat;
+using ChatRole = OllamaSharp.Models.Chat.ChatRole;
+
+namespace OllamaSharp.FunctionalTests;
+
+public class OllamaApiClientTests
+{
+ private readonly Uri _baseUri = new("http://localhost:11434");
+ private readonly string _model = "llama3.2:1b";
+ private readonly string _localModel = "OllamaSharpTest";
+ private readonly string _embeddingModel = "all-minilm:22m";
+
+#pragma warning disable NUnit1032
+ private OllamaApiClient _client = null!;
+#pragma warning restore NUnit1032
+
+ [SetUp]
+ public async Task Setup()
+ {
+ // Set up the test environment
+ _client = new OllamaApiClient(_baseUri);
+
+ await CleanupModel(_localModel);
+ }
+
+ [TearDown]
+ public async Task Teardown()
+ {
+ // Clean up the test environment
+ await CleanupModel(_localModel);
+
+ ((IChatClient?)_client)?.Dispose();
+ }
+
+ private async Task CleanupModel(string? model = null)
+ {
+ var modelExists = (await _client.ListLocalModelsAsync())
+ .Any(m => m.Name == (model ?? _model));
+
+ if (modelExists)
+ await _client.DeleteModelAsync(new DeleteModelRequest { Model = model ?? _model });
+ }
+
+ private async Task PullIfNotExists(string model)
+ {
+ var modelExists = (await _client.ListLocalModelsAsync())
+ .Any(m => m.Name == model);
+
+ if (!modelExists)
+ await _client.PullModelAsync(new PullModelRequest { Model = model })
+ .ToListAsync();
+ }
+
+
+ [Test, Order(1), Ignore("Prevent the model from being downloaded each test run")]
+ public async Task PullModel()
+ {
+ // Act
+ var response = await _client
+ .PullModelAsync(new PullModelRequest { Model = _model })
+ .ToListAsync();
+
+ // Assert
+ var models = await _client.ListLocalModelsAsync();
+ models.Should().Contain(m => m.Name == _model);
+
+ response.Should().NotBeEmpty();
+ response.Should().Contain(r => r!.Status == "pulling manifest");
+ response.Should().Contain(r => r!.Status == "success");
+ }
+
+ [Test, Order(2)]
+ public async Task CreateModel()
+ {
+ // Arrange
+ await PullIfNotExists(_localModel);
+
+ var model = new CreateModelRequest
+ {
+ Model = _localModel,
+ ModelFileContent =
+ """
+ FROM llama3.2
+ PARAMETER temperature 0.3
+ PARAMETER num_ctx 100
+
+ # sets a custom system message to specify the behavior of the chat assistant
+ SYSTEM You are a concise model that tries to return yes or no answers.
+ """
+ };
+
+ // Act
+ var response = await _client
+ .CreateModelAsync(model)
+ .ToListAsync();
+
+ // Assert
+ var models = await _client.ListLocalModelsAsync();
+ models.Should().Contain(m => m.Name.StartsWith(_localModel));
+
+ response.Should().NotBeEmpty();
+ response.Should().Contain(r => r!.Status == "success");
+ }
+
+ [Test, Order(3)]
+ public async Task CopyModel()
+ {
+ // Arrange
+ await PullIfNotExists(_localModel);
+
+ var model = new CopyModelRequest { Source = _localModel, Destination = $"{_localModel}-copy" };
+
+ // Act
+ await _client.CopyModelAsync(model);
+
+ // Assert
+ var models = await _client.ListLocalModelsAsync();
+ models.Should().Contain(m => m.Name == $"{_localModel}-copy:latest");
+
+ // Clean up
+ await _client.DeleteModelAsync(new DeleteModelRequest { Model = $"{_localModel}-copy:latest" });
+ }
+
+ [Test]
+ public async Task Embed()
+ {
+ // Arrange
+ await PullIfNotExists(_embeddingModel);
+
+ var request = new EmbedRequest { Model = _embeddingModel, Input = ["Hello, world!"] };
+
+ // Act
+ var response = await _client.EmbedAsync(request);
+
+ // Assert
+ response.Should().NotBeNull();
+ response.Embeddings.Should().NotBeEmpty();
+ response.LoadDuration.Should().BeGreaterThan(100, "Because loading the model should take some time");
+ response.TotalDuration.Should().BeGreaterThan(100, "Because generating embeddings should take some time");
+ }
+
+ [Test]
+ public async Task ListLocalModels()
+ {
+ // Act
+ var models = (await _client.ListLocalModelsAsync()).ToList();
+
+ // Assert
+ models.Should().NotBeEmpty();
+ models.Should().Contain(m => m.Name == _model);
+ }
+
+ [Test]
+ public async Task ListRunningModels()
+ {
+ // Arrange
+ await PullIfNotExists(_model);
+ var backgroundTask = Task.Run(async () =>
+ {
+ var generate = _client
+ .GenerateAsync(new GenerateRequest { Model = _model, Prompt = "Write a long song." })
+ .ToListAsync();
+
+ await Task.Yield();
+
+ await generate;
+ });
+
+ // Act
+ var modelsTask = _client.ListRunningModelsAsync();
+
+ await Task.WhenAll(backgroundTask, modelsTask);
+
+ // Assert
+ var models = modelsTask.Result.ToList();
+
+ models.Should().NotBeEmpty();
+ models.Should().Contain(m => m.Name == _model);
+ }
+
+ [Test]
+ public async Task ShowModel()
+ {
+ // Arrange
+ await PullIfNotExists(_model);
+
+ // Act
+ var response = await _client.ShowModelAsync(new ShowModelRequest { Model = _model });
+
+ // Assert
+ response.Should().NotBeNull();
+ response.Info.Should().NotBeNull();
+ response.Info.Architecture.Should().Be("llama");
+ response.Details.Should().NotBeNull();
+ response.Details.Format.Should().NotBeNullOrEmpty();
+ response.Details.Family.Should().Be("llama");
+ }
+
+ [Test]
+ public async Task DeleteModel()
+ {
+ // Arrange
+ await PullIfNotExists(_localModel);
+ await _client.CopyModelAsync(new CopyModelRequest
+ {
+ Source = _localModel, Destination = $"{_localModel}-copy"
+ });
+
+ var exists = (await _client.ListLocalModelsAsync())
+ .Any(m => m.Name == $"{_localModel}-copy:latest");
+
+ exists.Should().BeTrue();
+
+ // Act
+ await _client.DeleteModelAsync(new DeleteModelRequest { Model = $"{_localModel}-copy:latest" });
+
+ // Assert
+ var models = await _client.ListLocalModelsAsync();
+ models.Should().NotContain(m => m.Name == $"{_localModel}-copy:latest");
+ }
+
+ [Test]
+ public async Task GenerateAsync()
+ {
+ // Arrange
+ await PullIfNotExists(_model);
+
+ // Act
+ var response = await _client.GenerateAsync(new GenerateRequest
+ {
+ Model = _model,
+ Prompt =
+ "What is the meaning to life, the universe, and everything according to the Hitchhikers Guide to the Galaxy?"
+ })
+ .ToListAsync();
+
+ var joined = string.Join("", response.Select(r => r.Response));
+
+ // Assert
+ response.Should().NotBeEmpty();
+ joined.Should().Contain("42");
+ }
+
+ [Test]
+ public async Task ChatAsync()
+ {
+ // Arrange
+ await PullIfNotExists(_model);
+
+ // Act
+ var response = await _client.ChatAsync(new ChatRequest
+ {
+ Model = _model,
+ Messages = new[]
+ {
+ new Message
+ {
+ Role = ChatRole.User,
+ Content = "What is the meaning to life, the universe, and everything according to the Hitchhikers Guide to the Galaxy?"
+ },
+ new Message
+ {
+ Role = ChatRole.System,
+ Content = "According to the Hitchhikers Guide to the Galaxy, the meaning to life, the universe, and everything is 42."
+ },
+ new Message
+ {
+ Role = ChatRole.User,
+ Content = "Who is the author of the Hitchhikers Guide to the Galaxy?"
+ }
+ }
+ })
+ .ToListAsync();
+
+ var joined = string.Join("", response.Select(r => r.Message.Content));
+
+ // Assert
+ response.Should().NotBeEmpty();
+ joined.Should().Contain("Douglas Adams");
+ }
+
+ [Test]
+ public async Task IsRunningAsync()
+ {
+ // Act
+ var response = await _client.IsRunningAsync();
+
+ // Assert
+ response.Should().BeTrue();
+ }
+
+ [Test]
+ public async Task GetVersionAsync()
+ {
+ // Act
+ var response = await _client.GetVersionAsync();
+
+ // Assert
+ response.Should().NotBeNull();
+ }
+}
\ No newline at end of file
diff --git a/OllamaSharp.FunctionalTests/OllamaSharp.FunctionalTests.csproj b/OllamaSharp.FunctionalTests/OllamaSharp.FunctionalTests.csproj
new file mode 100644
index 0000000..37fd8bc
--- /dev/null
+++ b/OllamaSharp.FunctionalTests/OllamaSharp.FunctionalTests.csproj
@@ -0,0 +1,30 @@
+
+
+
+ net8.0
+ enable
+ enable
+
+ false
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/OllamaSharp.sln b/OllamaSharp.sln
index 52976f6..b2aed57 100644
--- a/OllamaSharp.sln
+++ b/OllamaSharp.sln
@@ -14,6 +14,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OllamaApiConsole", "demo\OllamaApiConsole.csproj", "{755670DB-33A4-441A-99C2-642A04D08953}"
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "OllamaSharp.FunctionalTests", "OllamaSharp.FunctionalTests\OllamaSharp.FunctionalTests.csproj", "{2A32FE50-1EDF-4E93-B6D8-6651975F6ACB}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -32,6 +34,10 @@ Global
{755670DB-33A4-441A-99C2-642A04D08953}.Debug|Any CPU.Build.0 = Debug|Any CPU
{755670DB-33A4-441A-99C2-642A04D08953}.Release|Any CPU.ActiveCfg = Release|Any CPU
{755670DB-33A4-441A-99C2-642A04D08953}.Release|Any CPU.Build.0 = Release|Any CPU
+ {2A32FE50-1EDF-4E93-B6D8-6651975F6ACB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {2A32FE50-1EDF-4E93-B6D8-6651975F6ACB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {2A32FE50-1EDF-4E93-B6D8-6651975F6ACB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {2A32FE50-1EDF-4E93-B6D8-6651975F6ACB}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/demo/Demos/ToolConsole.cs b/demo/Demos/ToolConsole.cs
index e154626..9574da9 100644
--- a/demo/Demos/ToolConsole.cs
+++ b/demo/Demos/ToolConsole.cs
@@ -93,7 +93,7 @@ public override async Task Run()
}
}
- private static IEnumerable GetTools() => [new WeatherTool(), new NewsTool()];
+ private static Tool[] GetTools() => [new WeatherTool(), new NewsTool()];
private sealed class WeatherTool : Tool
{
diff --git a/doc-template/public/main.css b/doc-template/public/main.css
new file mode 100644
index 0000000..98b5285
--- /dev/null
+++ b/doc-template/public/main.css
@@ -0,0 +1,9 @@
+.flex-row {
+ display: flex;
+ flex-direction: row;
+}
+
+.flex-column {
+ display: flex;
+ flex-direction: column;
+}
\ No newline at end of file
diff --git a/doc-template/public/main.js b/doc-template/public/main.js
new file mode 100644
index 0000000..b0feb6f
--- /dev/null
+++ b/doc-template/public/main.js
@@ -0,0 +1,10 @@
+export default {
+ defaultTheme: 'dark',
+ iconLinks: [
+ {
+ icon: 'github',
+ href: 'https://github.com/awaescher/OllamaSharp',
+ title: 'GitHub'
+ }
+ ]
+}
\ No newline at end of file
diff --git a/docfx.json b/docfx.json
new file mode 100644
index 0000000..b1cf83d
--- /dev/null
+++ b/docfx.json
@@ -0,0 +1,52 @@
+{
+ "metadata": [
+ {
+ "src": [
+ {
+ "src": "./src",
+ "files": [
+ "**/*.csproj"
+ ]
+ }
+ ],
+ "dest": "api"
+ }
+ ],
+ "build": {
+ "content": [
+ {
+ "files": [
+ "**/*.{md,yml}"
+ ],
+ "exclude": [
+ "_site/**"
+ ]
+ }
+ ],
+ "markdownEngineProperties": {
+ "markdigExtensions": [
+ "CustomContainers"
+ ]
+ },
+ "resource": [
+ {
+ "files": [
+ "images/**"
+ ]
+ }
+ ],
+ "output": "_site",
+ "template": [
+ "default",
+ "modern",
+ "doc-template"
+ ],
+ "globalMetadata": {
+ "_appName": "OllamaSharp",
+ "_appTitle": "OllamaSharp",
+ "_appLogoPath": "images/logo-sharp-nobg.png",
+ "_enableSearch": true,
+ "pdf": true
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/getting-started.md b/docs/getting-started.md
new file mode 100644
index 0000000..d927a06
--- /dev/null
+++ b/docs/getting-started.md
@@ -0,0 +1,74 @@
+# Getting Started
+
+The [OllamaSharp](https://github.com/awaescher/OllamaSharp) library provides complete
+coverage of the [Ollama](https://ollama.com/) API through simple, asynchronous
+streaming interfaces. The library further adds convenience classes and functions
+to simplify common use cases.
+
+Getting started with OllamaSharp only requires a running Ollama server and a
+supported version of [.NET](https://dotnet.microsoft.com/en-us/download).
+
+## Prerequisites
+
+- [Ollama](https://ollama.com/)
+- [.NET](https://dotnet.microsoft.com/en-us/download)
+
+## Pulling Your First Model
+
+You can't talk to Ollama without a model. To get started, you can pull a model
+from the Ollama repository. The following code snippet demonstrates how to
+connect to an Ollama server and pull a model.
+
+```csharp
+using OllamaSharp;
+
+// If you are running Ollama locally on the default port:
+var uri = new Uri("http://localhost:11434");
+var ollama = new OllamaApiClient(uri);
+
+// Pull the model, and print the status of the pull operation.
+await foreach (var status in ollama.PullModelAsync("llama3.2-vision"))
+ Console.WriteLine($"{status.Percent}% {status.Status}");
+
+Console.WriteLine("Model pulled successfully.");
+```
+
+If everything goes well, you should see something like the following output:
+
+```
+100% pulling manifest
+100% pulling 11f274007f09
+100% pulling ece5e659647a
+100% pulling 715415638c9c
+100% pulling 0b4284c1f870
+100% pulling fefc914e46e6
+100% pulling fbd313562bb7
+100% verifying sha256 digest
+100% writing manifest
+100% success
+Model pulled successfully.
+```
+
+## Getting Serenaded by Llamas
+
+Once you have a model, you can start conversing wih Ollama. The following code
+snippet demonstrates how to connect to an Ollama server, load a model, and start
+a conversation.
+
+
+```csharp
+using OllamaSharp;
+
+var uri = new Uri("http://localhost:11434");
+var model = "llama3.2-vision";
+
+var ollama = new OllamaApiClient(uri, model);
+
+var request = "Write a deep, beautiful song for me about AI and the future.";
+
+await foreach (var stream in ollama.GenerateAsync(request))
+ Console.Write(stream.Response);
+```
+
+If all went to plan, you should be swept off your feet by the smooth, dulcet tones
+of the Ollama AI.
\ No newline at end of file
diff --git a/docs/introduction.md b/docs/introduction.md
new file mode 100644
index 0000000..fe95d9e
--- /dev/null
+++ b/docs/introduction.md
@@ -0,0 +1,13 @@
+# Introduction
+
+[Ollama](https://ollama.com/) is a [Go](https://go.dev/)-based, open-source
+server for interacting with local Large Language Models using Georgi Gerganov's
+[llama.cpp](https://github.com/ggerganov/llama.cpp) library. Ollama provides
+first-class support for various models, including [llama3.2](https://ollama.com/library/llama3.2),
+[phi3.5](https://ollama.com/library/phi3.5), [mistral](https://ollama.com/library/mistral),
+and many more. It provides support for pulling, running, creating, pushing, and interacting
+with models.
+
+The [OllamaSharp](https://github.com/awaescher/OllamaSharp) library provides
+complete coverage of the Ollama API through simple, asynchronous streaming interfaces.
+The library further ads convenience classes and functions to simplify common use cases.
diff --git a/docs/toc.yml b/docs/toc.yml
new file mode 100644
index 0000000..d7e9ea8
--- /dev/null
+++ b/docs/toc.yml
@@ -0,0 +1,4 @@
+- name: Introduction
+ href: introduction.md
+- name: Getting Started
+ href: getting-started.md
\ No newline at end of file
diff --git a/images/dotnet@0.1x.png b/images/dotnet@0.1x.png
new file mode 100644
index 0000000..1420f88
Binary files /dev/null and b/images/dotnet@0.1x.png differ
diff --git a/images/logo-sharp-nobg.png b/images/logo-sharp-nobg.png
new file mode 100644
index 0000000..0fb7cec
Binary files /dev/null and b/images/logo-sharp-nobg.png differ
diff --git a/images/logo.png b/images/logo.png
new file mode 100644
index 0000000..85dd87b
Binary files /dev/null and b/images/logo.png differ
diff --git a/images/logo@0.1x.png b/images/logo@0.1x.png
new file mode 100644
index 0000000..eb7374f
Binary files /dev/null and b/images/logo@0.1x.png differ
diff --git a/index.md b/index.md
new file mode 100644
index 0000000..9ce2634
--- /dev/null
+++ b/index.md
@@ -0,0 +1,46 @@
+---
+_layout: landing
+---
+
+::::flex-row
+
+:::col
+
+![Ollama Logo](images/logo@0.1x.png) ➕ ![.NET Logo](images/dotnet@0.1x.png)
+
+# Build AI-powered applications with Ollama and .NET 🦙
+
+*OllamaSharp* provides .NET bindings for the [Ollama API](https://github.com/jmorganca/ollama/blob/main/docs/api.md),
+simplifying interactions with Ollama both locally and remotely.
+
+Provides support for interacting with Ollama directly, or through the [Microsoft.Extensions.AI](https://devblogs.microsoft.com/dotnet/introducing-microsoft-extensions-ai-preview/)
+and [Microsoft Semantic Kernel](https://github.com/microsoft/semantic-kernel/pull/7362) libraries.
+:::
+:::col
+
+### Add OllamaSharp to your project
+```bash
+dotnet add package OllamaSharp
+```
+
+### Start talking to Ollama
+```csharp
+using OllamaSharp;
+
+var uri = new Uri("http://localhost:11434");
+var ollama = new OllamaApiClient(uri, "llama3.2");
+var chat = new Chat(ollama);
+
+Console.WriteLine("You're now talking with Ollama. Hit Ctrl+C to exit.");
+
+while(true)
+{
+ Console.Write("You: ");
+ var input = Console.ReadLine();
+ var resposne = await chat.SendAsync(input).StreamToEndAsnyc();
+ Console.WriteLine($"Ollama: {response}");
+}
+```
+:::
+
+::::
\ No newline at end of file
diff --git a/src/AsyncEnumerableExtensions/ChatResponseStreamAppender.cs b/src/AsyncEnumerableExtensions/ChatResponseStreamAppender.cs
index af25c7e..9c76d20 100644
--- a/src/AsyncEnumerableExtensions/ChatResponseStreamAppender.cs
+++ b/src/AsyncEnumerableExtensions/ChatResponseStreamAppender.cs
@@ -4,15 +4,16 @@
namespace OllamaSharp.AsyncEnumerableExtensions;
///
-/// Appender to stream IAsyncEnumerable(ChatResponseStream) to build up one single ChatDoneResponseStream object
+/// Appender to stream to
+/// build up one single object
///
-public class ChatResponseStreamAppender : IAppender
+internal class ChatResponseStreamAppender : IAppender
{
private readonly MessageBuilder _messageBuilder = new();
private ChatDoneResponseStream? _lastItem;
///
- /// Appends a given ChatResponseStream item to build a single return object
+ /// Appends a given item to build a single return object
///
/// The item to append
public void Append(ChatResponseStream? item)
@@ -24,8 +25,10 @@ public void Append(ChatResponseStream? item)
}
///
- /// Builds up one single ChatDoneResponseStream object from the previously streamed ChatResponseStream items
+ /// Builds up one single object from the
+ /// previously streamed items
///
+ /// The completed consolidated object
public ChatDoneResponseStream? Complete()
{
if (_lastItem is null)
diff --git a/src/AsyncEnumerableExtensions/GenerateResponseStreamAppender.cs b/src/AsyncEnumerableExtensions/GenerateResponseStreamAppender.cs
index 4ee4694..11e1649 100644
--- a/src/AsyncEnumerableExtensions/GenerateResponseStreamAppender.cs
+++ b/src/AsyncEnumerableExtensions/GenerateResponseStreamAppender.cs
@@ -5,15 +5,16 @@
namespace OllamaSharp.AsyncEnumerableExtensions;
///
-/// Appender to stream IAsyncEnumerable(GenerateResponseStream) to build up one single GenerateDoneResponseStream object
+/// Appender to stream
+/// to build up one single object
///
-public class GenerateResponseStreamAppender : IAppender
+internal class GenerateResponseStreamAppender : IAppender
{
private readonly StringBuilder _builder = new();
private GenerateDoneResponseStream? _lastItem;
///
- /// Appends a given GenerateResponseStream item to build a single return object
+ /// Appends a given item to build a single return object
///
/// The item to append
public void Append(GenerateResponseStream? item)
@@ -25,8 +26,10 @@ public void Append(GenerateResponseStream? item)
}
///
- /// Builds up one single GenerateDoneResponseStream object from the previously streamed GenerateResponseStream items
+ /// Builds up one single object
+ /// from the previously streamed items
///
+ /// The completed, consolidated object
public GenerateDoneResponseStream? Complete()
{
if (_lastItem is null)
diff --git a/src/AsyncEnumerableExtensions/IAppender.cs b/src/AsyncEnumerableExtensions/IAppender.cs
index db819b1..99f9ae8 100644
--- a/src/AsyncEnumerableExtensions/IAppender.cs
+++ b/src/AsyncEnumerableExtensions/IAppender.cs
@@ -5,7 +5,7 @@ namespace OllamaSharp.AsyncEnumerableExtensions;
///
/// The type of the items of the IAsyncEnumerable
/// The return type after the IAsyncEnumerable was streamed to the end
-public interface IAppender
+internal interface IAppender
{
///
/// Appends an item to build up the return value
diff --git a/src/AsyncEnumerableExtensions/IAsyncEnumerableExtensions.cs b/src/AsyncEnumerableExtensions/IAsyncEnumerableExtensions.cs
index cf118f3..3a93901 100644
--- a/src/AsyncEnumerableExtensions/IAsyncEnumerableExtensions.cs
+++ b/src/AsyncEnumerableExtensions/IAsyncEnumerableExtensions.cs
@@ -19,7 +19,7 @@ public static partial class IAsyncEnumerableExtensions
///
/// The IAsyncEnumerable to stream
/// An optional callback to additionally process every single item from the IAsyncEnumerable
- /// A single response stream appened from every IAsyncEnumerable item
+ /// A single response stream append from every IAsyncEnumerable item
public static Task StreamToEndAsync(this IAsyncEnumerable stream, Action? itemCallback = null)
=> stream.StreamToEndAsync(new StringAppender(), itemCallback);
@@ -48,7 +48,7 @@ public static Task StreamToEndAsync(this IAsyncEnumerable stream
/// The appender instance used to build up one single response value
/// An optional callback to additionally process every single item from the IAsyncEnumerable
/// A single ChatDoneResponseStream built up from every single IAsyncEnumerable item
- public static async Task StreamToEndAsync(this IAsyncEnumerable stream, IAppender appender, Action? itemCallback = null)
+ internal static async Task StreamToEndAsync(this IAsyncEnumerable stream, IAppender appender, Action? itemCallback = null)
{
await foreach (var item in stream.ConfigureAwait(false))
{
diff --git a/src/AsyncEnumerableExtensions/StringAppender.cs b/src/AsyncEnumerableExtensions/StringAppender.cs
index abd99ed..6b3dd1d 100644
--- a/src/AsyncEnumerableExtensions/StringAppender.cs
+++ b/src/AsyncEnumerableExtensions/StringAppender.cs
@@ -3,9 +3,9 @@
namespace OllamaSharp.AsyncEnumerableExtensions;
///
-/// Appender to stream IAsyncEnumerable(string) to build up one single result string
+/// Appender to stream to build up one single result string
///
-public class StringAppender : IAppender
+internal class StringAppender : IAppender
{
private readonly StringBuilder _builder = new();
diff --git a/src/ByteArrayExtensions.cs b/src/ByteArrayExtensions.cs
index ce11a7e..ffea185 100644
--- a/src/ByteArrayExtensions.cs
+++ b/src/ByteArrayExtensions.cs
@@ -7,17 +7,20 @@ namespace OllamaSharp;
///
/// Extensions for byte arrays
///
-public static class ByteArrayExtensions
+internal static class ByteArrayExtensions
{
///
- /// Converts a series of bytes to a base64 string
+ /// Converts a sequence of bytes to its equivalent string representation encoded in base-64.
///
- /// The bytes to convert to base64
- public static string ToBase64(this IEnumerable? bytes) => Convert.ToBase64String(bytes.ToArray());
+ /// The sequence of bytes to convert to a base-64 string.
+ /// A base-64 encoded string representation of the input byte sequence.
+ public static string ToBase64(this IEnumerable bytes) => Convert.ToBase64String(bytes.ToArray());
+
///
- /// Converts multiple series of bytes to multiple base64 strings, one for each.
+ /// Converts a collection of byte arrays to a collection of base64 strings.
///
- /// The series of bytes to convert to base64
- public static IEnumerable? ToBase64(this IEnumerable>? byteArrays) => byteArrays?.Select(ToBase64);
+ /// The collection of byte arrays to convert to base64 strings.
+ /// A collection of base64 strings, or null if the input is null.
+ public static IEnumerable? ToBase64(this IEnumerable>? byteArrays) => byteArrays?.Select(bytes => bytes.ToBase64());
}
\ No newline at end of file
diff --git a/src/Chat.cs b/src/Chat.cs
index 0d0e0e2..d788766 100644
--- a/src/Chat.cs
+++ b/src/Chat.cs
@@ -12,6 +12,31 @@ namespace OllamaSharp;
///
/// A chat helper that handles the chat logic internally and
/// automatically extends the message history.
+///
+///
+/// A simple interactive chat can be implemented in just a handful of lines:
+///
+/// var ollama = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest");
+/// var chat = new Chat(ollama);
+/// // ...
+/// while (true)
+/// {
+/// Console.Write("You: ");
+/// var message = Console.ReadLine()!;
+/// Console.Write("Ollama: ");
+/// await foreach (var answerToken in chat.SendAsync(message))
+/// Console.Write(answerToken);
+/// // ...
+/// Console.WriteLine();
+/// }
+/// // ...
+/// // Output:
+/// // You: Write a haiku about AI models
+/// // Ollama: Code whispers secrets
+/// // Intelligent designs unfold
+/// // Minds beyond our own
+///
+///
///
public class Chat
{
@@ -40,7 +65,17 @@ public class Chat
///
/// The Ollama client to use for the chat
/// An optional system prompt to define the behavior of the chat assistant
- ///
+ ///
+ /// If the client is null, an is thrown.
+ ///
+ ///
+ /// Setting up a chat with a system prompt:
+ ///
+ /// var client = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest");
+ /// var prompt = "You are a helpful assistant that will answer any question you are asked.";
+ /// var chat = new Chat(client, prompt);
+ ///
+ ///
public Chat(IOllamaApiClient client, string systemPrompt = "")
{
Client = client ?? throw new ArgumentNullException(nameof(client));
@@ -55,6 +90,15 @@ public Chat(IOllamaApiClient client, string systemPrompt = "")
///
/// The message to send
/// The token to cancel the operation with
+ /// An that streams the response.
+ ///
+ /// Getting a response from the model:
+ ///
+ /// var response = await chat.SendAsync("Write a haiku about AI models");
+ /// await foreach (var answerToken in response)
+ /// Console.WriteLine(answerToken);
+ ///
+ ///
public IAsyncEnumerable SendAsync(string message, CancellationToken cancellationToken = default)
=> SendAsync(message, tools: null, imagesAsBase64: null, cancellationToken);
@@ -64,7 +108,25 @@ public IAsyncEnumerable SendAsync(string message, CancellationToken canc
/// The message to send
/// Images in byte representation to send to the model
/// The token to cancel the operation with
- public IAsyncEnumerable SendAsync(string message, IEnumerable> imagesAsBytes, CancellationToken cancellationToken = default)
+ /// An that streams the response.
+ ///
+ /// Getting a response from the model with an image:
+ ///
+ /// var client = new HttpClient();
+ /// var cat = await client.GetByteArrayAsync("https://cataas.com/cat");
+ /// var ollama = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest");
+ /// var chat = new Chat(ollama);
+ /// var response = chat.SendAsync("What do you see?", [cat]);
+ /// await foreach (var answerToken in response) Console.Write(answerToken);
+ ///
+ /// // Output: The image shows a white kitten with black markings on its
+ /// // head and tail, sitting next to an orange tabby cat. The kitten
+ /// // is looking at the camera while the tabby cat appears to be
+ /// // sleeping or resting with its eyes closed. The two cats are
+ /// // lying in a blanket that has been rumpled up.
+ ///
+ ///
+ public IAsyncEnumerable SendAsync(string message, IEnumerable>? imagesAsBytes, CancellationToken cancellationToken = default)
=> SendAsync(message, imagesAsBytes?.ToBase64() ?? [], cancellationToken);
///
@@ -73,7 +135,26 @@ public IAsyncEnumerable SendAsync(string message, IEnumerableThe message to send
/// Base64 encoded images to send to the model
/// The token to cancel the operation with
- public IAsyncEnumerable SendAsync(string message, IEnumerable imagesAsBase64, CancellationToken cancellationToken = default)
+ /// An that streams the response.
+ ///
+ /// Getting a response from the model with an image:
+ ///
+ /// var client = new HttpClient();
+ /// var cat = await client.GetByteArrayAsync("https://cataas.com/cat");
+ /// var base64Cat = Convert.ToBase64String(cat);
+ /// var ollama = new OllamaApiClient("http://localhost:11434", "llama3.2-vision:latest");
+ /// var chat = new Chat(ollama);
+ /// var response = chat.SendAsync("What do you see?", [base64Cat]);
+ /// await foreach (var answerToken in response) Console.Write(answerToken);
+ ///
+ /// // Output:
+ /// // The image shows a cat lying on the floor next to an iPad. The cat is looking
+ /// // at the screen, which displays a game with fish and other sea creatures. The
+ /// // cat's paw is touching the screen, as if it is playing the game. The background
+ /// // of the image is a wooden floor.
+ ///
+ ///
+ public IAsyncEnumerable SendAsync(string message, IEnumerable? imagesAsBase64, CancellationToken cancellationToken = default)
=> SendAsync(message, [], imagesAsBase64, cancellationToken);
///
@@ -83,7 +164,7 @@ public IAsyncEnumerable SendAsync(string message, IEnumerable im
/// Tools that the model can make use of, see https://ollama.com/blog/tool-support. By using tools, response streaming is automatically turned off
/// Base64 encoded images to send to the model
/// The token to cancel the operation with
- public IAsyncEnumerable SendAsync(string message, IEnumerable? tools, IEnumerable? imagesAsBase64 = default, CancellationToken cancellationToken = default)
+ public IAsyncEnumerable SendAsync(string message, IReadOnlyCollection? tools, IEnumerable? imagesAsBase64 = default, CancellationToken cancellationToken = default)
=> SendAsAsync(ChatRole.User, message, tools, imagesAsBase64, cancellationToken);
///
@@ -102,7 +183,7 @@ public IAsyncEnumerable SendAsAsync(ChatRole role, string message, Cance
/// The message to send
/// Images in byte representation to send to the model
/// The token to cancel the operation with
- public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable> imagesAsBytes, CancellationToken cancellationToken = default)
+ public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable>? imagesAsBytes, CancellationToken cancellationToken = default)
=> SendAsAsync(role, message, imagesAsBytes?.ToBase64() ?? [], cancellationToken);
///
@@ -112,7 +193,7 @@ public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnum
/// The message to send
/// Base64 encoded images to send to the model
/// The token to cancel the operation with
- public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable imagesAsBase64, CancellationToken cancellationToken = default)
+ public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? imagesAsBase64, CancellationToken cancellationToken = default)
=> SendAsAsync(role, message, [], imagesAsBase64, cancellationToken);
///
@@ -123,7 +204,7 @@ public IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnum
/// Tools that the model can make use of, see https://ollama.com/blog/tool-support. By using tools, response streaming is automatically turned off
/// Base64 encoded images to send to the model
/// The token to cancel the operation with
- public async IAsyncEnumerable SendAsAsync(ChatRole role, string message, IEnumerable? tools, IEnumerable? imagesAsBase64 = default, [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ public async IAsyncEnumerable SendAsAsync(ChatRole role, string message, IReadOnlyCollection? tools, IEnumerable? imagesAsBase64 = default, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
Messages.Add(new Message(role, message, imagesAsBase64?.ToArray()));
diff --git a/src/HttpRequestMessageExtensions.cs b/src/HttpRequestMessageExtensions.cs
index e8f2163..264721d 100644
--- a/src/HttpRequestMessageExtensions.cs
+++ b/src/HttpRequestMessageExtensions.cs
@@ -1,20 +1,21 @@
using System.Collections.Generic;
using System.Net.Http;
+using System.Net.Http.Headers;
using OllamaSharp.Models;
namespace OllamaSharp;
///
-/// Extension methods for the http request message
+/// Provides extension methods for the class.
///
-public static class HttpRequestMessageExtensions
+internal static class HttpRequestMessageExtensions
{
///
- /// Applies default headers from the OllamaApiClient and optional Ollama requests
+ /// Applies custom headers to the instance.
///
- /// The http request message to set the headers on
- /// The headers to set on the request message
- /// The request to the Ollama API to get the custom headers from
+ /// The to set the headers on.
+ /// A dictionary containing the headers to set on the request message.
+ /// An optional to get additional custom headers from.
public static void ApplyCustomHeaders(this HttpRequestMessage requestMessage, Dictionary headers, OllamaRequest? ollamaRequest)
{
foreach (var header in headers)
@@ -27,7 +28,13 @@ public static void ApplyCustomHeaders(this HttpRequestMessage requestMessage, Di
}
}
- private static void AddOrUpdateHeaderValue(System.Net.Http.Headers.HttpRequestHeaders requestMessageHeaders, string headerKey, string headerValue)
+ ///
+ /// Adds or updates a header value in the collection.
+ ///
+ /// The collection to update.
+ /// The key of the header to add or update.
+ /// The value of the header to add or update.
+ private static void AddOrUpdateHeaderValue(HttpRequestHeaders requestMessageHeaders, string headerKey, string headerValue)
{
if (requestMessageHeaders.Contains(headerKey))
requestMessageHeaders.Remove(headerKey);
diff --git a/src/IOllamaApiClient.cs b/src/IOllamaApiClient.cs
index 8f2ddf3..b4ce870 100644
--- a/src/IOllamaApiClient.cs
+++ b/src/IOllamaApiClient.cs
@@ -15,121 +15,125 @@ namespace OllamaSharp;
///
public interface IOllamaApiClient
{
- ///
- /// Gets the endpoint uri used by the api client
- ///
- public Uri Uri { get; }
-
- ///
- /// Gets or sets the name of the model to run requests on.
- ///
- string SelectedModel { get; set; }
-
- ///
- /// Sends a request to the /api/chat endpoint and streams the response of the chat.
- /// To implement a fully interactive chat, you should make use of the Chat class with "new Chat(...)"
- ///
- /// The request to send to Ollama
- /// The token to cancel the operation with
- ///
- /// An asynchronous enumerable that yields ChatResponseStream. Each item
- /// represents a message in the chat response stream. Returns null when the
- /// stream is completed.
- ///
- ///
- /// This is the method to call the Ollama endpoint /api/chat. You might not want to do this manually.
- /// To implement a fully interactive chat, you should make use of the Chat class with "new Chat(...)"
- ///
- IAsyncEnumerable ChatAsync(ChatRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/copy endpoint to copy a model
- ///
- /// The parameters required to copy a model
- /// The token to cancel the operation with
- Task CopyModelAsync(CopyModelRequest request, CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/create endpoint to create a model
- ///
- /// The request object containing the model details
- /// The token to cancel the operation with
- /// An asynchronous enumerable of the model creation status
- IAsyncEnumerable CreateModelAsync(CreateModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/delete endpoint to delete a model
- ///
- /// The request containing the model to delete
- /// The token to cancel the operation with
- Task DeleteModelAsync(DeleteModelRequest request, CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/embed endpoint to generate embeddings
- ///
- /// The parameters to generate embeddings for
- /// The token to cancel the operation with
- Task EmbedAsync(EmbedRequest request, CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/tags endpoint to get all models that are available locally
- ///
- /// The token to cancel the operation with
- Task> ListLocalModelsAsync(CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/ps endpoint to get the running models
- ///
- /// The token to cancel the operation with
- Task> ListRunningModelsAsync(CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/pull endpoint to pull a new model
- ///
- /// The request specifying the model name and whether to use insecure connection
- /// The token to cancel the operation with
- ///
- /// Async enumerable of PullStatus objects representing the status of the
- /// model pull operation
- ///
- IAsyncEnumerable PullModelAsync(PullModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
-
- ///
- /// Pushes a model to the Ollama API endpoint.
- ///
- /// The request containing the model information to push.
- /// The token to cancel the operation with.
- ///
- /// An asynchronous enumerable of push status updates. Use the enumerator
- /// to retrieve the push status updates.
- ///
- IAsyncEnumerable PushModelAsync(PushModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
-
- ///
- /// Sends a request to the /api/show endpoint to show the information of a model
- ///
- /// The request containing the name of the model the get the information for
- /// The token to cancel the operation with
- /// The model information
- Task ShowModelAsync(ShowModelRequest request, CancellationToken cancellationToken = default);
-
- ///
- /// Streams completion responses from the /api/generate endpoint on the Ollama API based on the provided request.
- ///
- /// The request containing the parameters for the completion.
- /// The token to cancel the operation with.
- /// An asynchronous enumerable of completion response streams.
- IAsyncEnumerable GenerateAsync(GenerateRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
-
- ///
- /// Sends a query to check whether the Ollama api is running or not
- ///
- /// The token to cancel the operation with
- Task IsRunningAsync(CancellationToken cancellationToken = default);
-
- ///
- /// Get the version of Ollama
- ///
- /// The token to cancel the operation with
- Task GetVersionAsync(CancellationToken cancellationToken = default);
+ ///
+ /// Gets the endpoint URI used by the API client.
+ ///
+ Uri Uri { get; }
+
+ ///
+ /// Gets or sets the name of the model to run requests on.
+ ///
+ string SelectedModel { get; set; }
+
+ ///
+ /// Sends a request to the /api/chat endpoint and streams the response of the chat.
+ ///
+ /// The request to send to Ollama.
+ /// The token to cancel the operation with.
+ ///
+ /// An asynchronous enumerable that yields . Each item
+ /// represents a message in the chat response stream. Returns null when the
+ /// stream is completed.
+ ///
+ ///
+ /// This is the method to call the Ollama endpoint /api/chat. You might not want to do this manually.
+ /// To implement a fully interactive chat, you should make use of the Chat class with "new Chat(...)"
+ ///
+ IAsyncEnumerable ChatAsync(ChatRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/copy endpoint to copy a model.
+ ///
+ /// The parameters required to copy a model.
+ /// The token to cancel the operation with.
+ Task CopyModelAsync(CopyModelRequest request, CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/create endpoint to create a model.
+ ///
+ /// The request object containing the model details.
+ /// The token to cancel the operation with.
+ /// An asynchronous enumerable of the model creation status.
+ IAsyncEnumerable CreateModelAsync(CreateModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/delete endpoint to delete a model.
+ ///
+ /// The request containing the model to delete.
+ /// The token to cancel the operation with.
+ Task DeleteModelAsync(DeleteModelRequest request, CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/embed endpoint to generate embeddings.
+ ///
+ /// The parameters to generate embeddings for.
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains the .
+ Task EmbedAsync(EmbedRequest request, CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/tags endpoint to get all models that are available locally.
+ ///
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains a collection of .
+ Task> ListLocalModelsAsync(CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/ps endpoint to get the running models.
+ ///
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains a collection of .
+ Task> ListRunningModelsAsync(CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/pull endpoint to pull a new model.
+ ///
+ /// The request specifying the model name and whether to use an insecure connection.
+ /// The token to cancel the operation with.
+ ///
+ /// An asynchronous enumerable of objects representing the status of the
+ /// model pull operation.
+ ///
+ IAsyncEnumerable PullModelAsync(PullModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
+
+ ///
+ /// Pushes a model to the Ollama API endpoint.
+ ///
+ /// The request containing the model information to push.
+ /// The token to cancel the operation with.
+ ///
+ /// An asynchronous enumerable of push status updates. Use the enumerator
+ /// to retrieve the push status updates.
+ ///
+ IAsyncEnumerable PushModelAsync(PushModelRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a request to the /api/show endpoint to show the information of a model.
+ ///
+ /// The request containing the name of the model to get the information for.
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains the .
+ Task ShowModelAsync(ShowModelRequest request, CancellationToken cancellationToken = default);
+
+ ///
+ /// Streams completion responses from the /api/generate endpoint on the Ollama API based on the provided request.
+ ///
+ /// The request containing the parameters for the completion.
+ /// The token to cancel the operation with.
+ /// An asynchronous enumerable of .
+ IAsyncEnumerable GenerateAsync(GenerateRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default);
+
+ ///
+ /// Sends a query to check whether the Ollama API is running or not.
+ ///
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains a boolean indicating whether the API is running.
+ Task IsRunningAsync(CancellationToken cancellationToken = default);
+
+ ///
+ /// Gets the version of Ollama.
+ ///
+ /// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation. The task result contains the .
+ Task GetVersionAsync(CancellationToken cancellationToken = default);
}
\ No newline at end of file
diff --git a/src/MicrosoftAi/AbstractionMapper.cs b/src/MicrosoftAi/AbstractionMapper.cs
index f9e1c1a..04e905e 100644
--- a/src/MicrosoftAi/AbstractionMapper.cs
+++ b/src/MicrosoftAi/AbstractionMapper.cs
@@ -12,13 +12,14 @@ namespace OllamaSharp.MicrosoftAi;
///
/// Provides mapping functionality between OllamaSharp and Microsoft.Extensions.AI models.
///
-public static class AbstractionMapper
+internal static class AbstractionMapper
{
///
/// Maps a and to a .
///
/// The response stream with completion data.
/// The used model. This has to be a separate argument because there might be fallbacks from the calling method.
+ /// A object containing the mapped data.
public static ChatCompletion? ToChatCompletion(ChatDoneResponseStream? stream, string? usedModel)
{
if (stream is null)
@@ -40,21 +41,23 @@ public static class AbstractionMapper
}
///
- /// Converts Microsoft.Extensions.AI messages and options to an OllamaSharp chat request.
+ /// Converts Microsoft.Extensions.AI objects and
+ /// an option instance to an OllamaSharp .
///
/// A list of chat messages.
/// Optional chat options to configure the request.
/// Indicates if the request should be streamed.
/// Serializer options
+ /// A object containing the converted data.
public static ChatRequest ToOllamaSharpChatRequest(IList chatMessages, ChatOptions? options, bool stream, JsonSerializerOptions serializerOptions)
{
var request = new ChatRequest
{
- Format = options?.ResponseFormat == ChatResponseFormat.Json ? "json" : null,
+ Format = Equals(options?.ResponseFormat, ChatResponseFormat.Json) ? "json" : null,
KeepAlive = null,
Messages = ToOllamaSharpMessages(chatMessages, serializerOptions),
Model = options?.ModelId ?? "", // will be set OllamaApiClient.SelectedModel if not set
- Options = new Models.RequestOptions
+ Options = new RequestOptions
{
FrequencyPenalty = options?.FrequencyPenalty,
PresencePenalty = options?.PresencePenalty,
@@ -115,7 +118,7 @@ public static ChatRequest ToOllamaSharpChatRequest(IList chatMessag
/// The chat options from the Microsoft abstraction
/// The Ollama setting to add
/// The setter to set the Ollama option if available in the chat options
- private static void TryAddOllamaOption(ChatOptions microsoftChatOptions, OllamaOption option, Action
/// The tools to convert.
+ /// An enumeration of objects containing the converted data.
private static IEnumerable? ToOllamaSharpTools(IEnumerable? tools)
{
return tools?.Select(ToOllamaSharpTool)
@@ -133,9 +137,13 @@ private static void TryAddOllamaOption(ChatOptions microsoftChatOptions, Olla
}
///
- /// Converts an Microsoft.Extensions.AI. to an OllamaSharp tool.
+ /// Converts a Microsoft.Extensions.AI. to an OllamaSharp .
///
/// The tool to convert.
+ ///
+ /// If parseable, a object containing the converted data,
+ /// otherwise .
+ ///
private static Tool? ToOllamaSharpTool(AITool tool)
{
if (tool is AIFunction f)
@@ -148,7 +156,8 @@ private static void TryAddOllamaOption(ChatOptions microsoftChatOptions, Olla
/// Converts to a .
///
/// The function metadata to convert.
- private static Tool? ToOllamaSharpTool(AIFunctionMetadata functionMetadata)
+ /// A object containing the converted data.
+ private static Tool ToOllamaSharpTool(AIFunctionMetadata functionMetadata)
{
return new Tool
{
@@ -176,7 +185,8 @@ private static void TryAddOllamaOption(ChatOptions microsoftChatOptions, Olla
/// Converts parameter schema object to a function type string.
///
/// The schema object holding schema type information.
- private static IEnumerable? GetPossibleValues(JsonObject? schema)
+ /// A collection of strings containing the function types.
+ private static IEnumerable GetPossibleValues(JsonObject? schema)
{
return []; // TODO others supported?
}
@@ -185,6 +195,7 @@ private static void TryAddOllamaOption(ChatOptions microsoftChatOptions, Olla
/// Converts parameter schema object to a function type string.
///
/// The schema object holding schema type information.
+ /// A string containing the function type.
private static string ToFunctionTypeString(JsonObject? schema)
{
return "string"; // TODO others supported?
@@ -195,6 +206,7 @@ private static string ToFunctionTypeString(JsonObject? schema)
///
/// The chat messages to convert.
/// Serializer options
+ /// An enumeration of objects containing the converted data.
private static IEnumerable ToOllamaSharpMessages(IList chatMessages, JsonSerializerOptions serializerOptions)
{
foreach (var cm in chatMessages)
@@ -236,7 +248,8 @@ private static IEnumerable ToOllamaSharpMessages(IList cha
/// Converts a Microsoft.Extensions.AI. to a base64 image string.
///
/// The data content to convert.
- private static string ToOllamaImage(ImageContent content)
+ /// A string containing the base64 image data.
+ private static string ToOllamaImage(ImageContent? content)
{
if (content is null)
return string.Empty;
@@ -251,6 +264,7 @@ private static string ToOllamaImage(ImageContent content)
/// Converts a Microsoft.Extensions.AI. to a .
///
/// The function call content to convert.
+ /// A object containing the converted data.
private static Message.ToolCall ToOllamaSharpToolCall(FunctionCallContent functionCall)
{
return new Message.ToolCall
@@ -267,14 +281,15 @@ private static Message.ToolCall ToOllamaSharpToolCall(FunctionCallContent functi
/// Maps a to an .
///
/// The chat role to map.
+ /// A object containing the mapped role.
private static Models.Chat.ChatRole ToOllamaSharpRole(Microsoft.Extensions.AI.ChatRole role)
{
return role.Value switch
{
- "assistant" => OllamaSharp.Models.Chat.ChatRole.Assistant,
- "system" => OllamaSharp.Models.Chat.ChatRole.System,
- "user" => OllamaSharp.Models.Chat.ChatRole.User,
- "tool" => OllamaSharp.Models.Chat.ChatRole.Tool,
+ "assistant" => Models.Chat.ChatRole.Assistant,
+ "system" => Models.Chat.ChatRole.System,
+ "user" => Models.Chat.ChatRole.User,
+ "tool" => Models.Chat.ChatRole.Tool,
_ => new OllamaSharp.Models.Chat.ChatRole(role.Value),
};
}
@@ -283,6 +298,7 @@ private static Models.Chat.ChatRole ToOllamaSharpRole(Microsoft.Extensions.AI.Ch
/// Maps an to a .
///
/// The chat role to map.
+ /// A object containing the mapped role.
private static Microsoft.Extensions.AI.ChatRole ToAbstractionRole(OllamaSharp.Models.Chat.ChatRole? role)
{
if (role is null)
@@ -302,6 +318,7 @@ private static Microsoft.Extensions.AI.ChatRole ToAbstractionRole(OllamaSharp.Mo
/// Converts a to a .
///
/// The response stream to convert.
+ /// A object containing the latest chat completion chunk.
public static StreamingChatCompletionUpdate ToStreamingChatCompletionUpdate(ChatResponseStream? response)
{
return new StreamingChatCompletionUpdate
@@ -312,6 +329,7 @@ public static StreamingChatCompletionUpdate ToStreamingChatCompletionUpdate(Chat
CreatedAt = response?.CreatedAt,
FinishReason = response?.Done == true ? ChatFinishReason.Stop : null,
RawRepresentation = response,
+ // TODO: Check if "Message" can ever actually be null. If not, remove the null-coalescing operator
Text = response?.Message?.Content ?? string.Empty,
Role = ToAbstractionRole(response?.Message?.Role),
ModelId = response?.Model
@@ -322,6 +340,7 @@ public static StreamingChatCompletionUpdate ToStreamingChatCompletionUpdate(Chat
/// Converts a to a .
///
/// The message to convert.
+ /// A object containing the converted data.
public static ChatMessage ToChatMessage(Message message)
{
var contents = new List();
@@ -350,7 +369,8 @@ public static ChatMessage ToChatMessage(Message message)
/// Parses additional properties from a .
///
/// The response to parse.
- private static AdditionalPropertiesDictionary? ParseOllamaChatResponseProps(ChatDoneResponseStream response)
+ /// An object containing the parsed additional properties.
+ private static AdditionalPropertiesDictionary ParseOllamaChatResponseProps(ChatDoneResponseStream response)
{
const double NANOSECONDS_PER_MILLISECOND = 1_000_000;
@@ -367,7 +387,8 @@ public static ChatMessage ToChatMessage(Message message)
/// Parses additional properties from a .
///
/// The response to parse.
- private static AdditionalPropertiesDictionary? ParseOllamaEmbedResponseProps(EmbedResponse response)
+ /// An object containing the parsed additional properties.
+ private static AdditionalPropertiesDictionary ParseOllamaEmbedResponseProps(EmbedResponse response)
{
const double NANOSECONDS_PER_MILLISECOND = 1_000_000;
@@ -382,6 +403,7 @@ public static ChatMessage ToChatMessage(Message message)
/// Maps a string representation of a finish reason to a .
///
/// The finish reason string.
+ /// A object containing the chat finish reason.
private static ChatFinishReason? ToFinishReason(string? ollamaDoneReason)
{
return ollamaDoneReason switch
@@ -414,10 +436,11 @@ public static ChatMessage ToChatMessage(Message message)
}
///
- /// Gets an embedding request for the Ollama API
+ /// Gets an for the Ollama API.
///
- /// The values to get embeddings for
- /// The options for the embeddings
+ /// The values to get embeddings for.
+ /// The options for the embeddings.
+ /// An object containing the request data.
public static EmbedRequest ToOllamaEmbedRequest(IEnumerable values, EmbeddingGenerationOptions? options)
{
var request = new EmbedRequest()
@@ -439,13 +462,15 @@ public static EmbedRequest ToOllamaEmbedRequest(IEnumerable values, Embe
}
///
- /// Gets Microsoft GeneratedEmbeddings mapped from Ollama embeddings
+ /// Gets Microsoft GeneratedEmbeddings mapped from Ollama embeddings.
///
- /// The original Ollama request that was used to generate the embeddings
- /// The response from Ollama containing the embeddings
+ /// The original Ollama request that was used to generate the embeddings.
+ /// The response from Ollama containing the embeddings.
/// The used model. This has to be a separate argument because there might be fallbacks from the calling method.
+ /// A object containing the mapped embeddings.
public static GeneratedEmbeddings> ToGeneratedEmbeddings(EmbedRequest ollamaRequest, EmbedResponse ollamaResponse, string? usedModel)
{
+ // TODO: Check if this can ever actually be null. If not, remove the null-coalescing operator
var mapped = (ollamaResponse.Embeddings ?? []).Select(vector => new Embedding(vector)
{
CreatedAt = DateTimeOffset.UtcNow,
diff --git a/src/MicrosoftAi/ChatOptionsExtensions.cs b/src/MicrosoftAi/ChatOptionsExtensions.cs
index 8b50fc6..31fee77 100644
--- a/src/MicrosoftAi/ChatOptionsExtensions.cs
+++ b/src/MicrosoftAi/ChatOptionsExtensions.cs
@@ -12,11 +12,12 @@ public static class ChatOptionsExtensions
{
///
/// Adds Ollama specific options to the additional properties of ChatOptions.
- /// These can be interpreted sent to the Ollama API by OllamaSharp.
+ /// These can be interpreted and sent to the Ollama API by OllamaSharp.
///
/// The chat options to set Ollama options on
/// The Ollama option to set, like OllamaOption.NumCtx for the option 'num_ctx'
/// The value for the option
+ /// The with the Ollama option set
public static ChatOptions AddOllamaOption(this ChatOptions chatOptions, OllamaOption option, object value)
{
chatOptions.AdditionalProperties ??= [];
diff --git a/src/MicrosoftAi/IAsyncEnumerableExtensions.cs b/src/MicrosoftAi/IAsyncEnumerableExtensions.cs
index 3cb2ff1..43bf37a 100644
--- a/src/MicrosoftAi/IAsyncEnumerableExtensions.cs
+++ b/src/MicrosoftAi/IAsyncEnumerableExtensions.cs
@@ -13,11 +13,11 @@ namespace OllamaSharp;
public static partial class IAsyncEnumerableExtensions
{
///
- /// Streams a given IAsyncEnumerable of response chunks to its end and builds one single StreamingChatCompletionUpdate out of them.
+ /// Streams a given of response chunks to its end and builds one single out of them.
///
- /// The IAsyncEnumerable to stream
+ /// The to stream
/// An optional callback to additionally process every single item from the IAsyncEnumerable
- /// A single StreamingChatCompletionUpdate built up from every single IAsyncEnumerable item
+ /// A single built up from every single IAsyncEnumerable item
public static Task StreamToEndAsync(this IAsyncEnumerable stream, Action? itemCallback = null)
=> stream.StreamToEndAsync(new MicrosoftAi.StreamingChatCompletionUpdateAppender(), itemCallback);
}
diff --git a/src/MicrosoftAi/OllamaFunctionResultContent.cs b/src/MicrosoftAi/OllamaFunctionResultContent.cs
index efe4853..28e941c 100644
--- a/src/MicrosoftAi/OllamaFunctionResultContent.cs
+++ b/src/MicrosoftAi/OllamaFunctionResultContent.cs
@@ -2,8 +2,21 @@ namespace OllamaSharp.MicrosoftAi;
using System.Text.Json;
+///
+/// A holder for the result of an Ollama function call.
+///
internal sealed class OllamaFunctionResultContent
{
+ ///
+ /// The function call ID for which this is the result.
+ ///
public string? CallId { get; set; }
+
+ ///
+ /// This element value may be if the function returned ,
+ /// if the function was void-returning and thus had no result, or if the function call failed.
+ /// Typically, however, in order to provide meaningfully representative information to an AI service,
+ /// a human-readable representation of those conditions should be supplied.
+ ///
public JsonElement Result { get; set; }
}
\ No newline at end of file
diff --git a/src/MicrosoftAi/StreamingChatCompletionUpdateAppender.cs b/src/MicrosoftAi/StreamingChatCompletionUpdateAppender.cs
index 906b44e..1ab216c 100644
--- a/src/MicrosoftAi/StreamingChatCompletionUpdateAppender.cs
+++ b/src/MicrosoftAi/StreamingChatCompletionUpdateAppender.cs
@@ -3,20 +3,22 @@
namespace OllamaSharp.MicrosoftAi;
///
-/// Appender to stream IAsyncEnumerable(StreamingChatCompletionUpdate) to build up one single StreamingChatCompletionUpdate object
+/// Appender to stream
+/// to build up one consolidated object
///
-public class StreamingChatCompletionUpdateAppender : IAppender
+internal class StreamingChatCompletionUpdateAppender : IAppender
{
private readonly StreamingChatCompletionUpdateBuilder _messageBuilder = new();
///
- /// Appends a given StreamingChatCompletionUpdate item to build a single return object
+ /// Appends a given item to build a single return object
///
/// The item to append
public void Append(StreamingChatCompletionUpdate? item) => _messageBuilder.Append(item);
///
- /// Builds up one single StreamingChatCompletionUpdate object from the previously streamed items
+ /// Builds up one final, single object from the previously streamed items
///
+ /// The completed, consolidated object
public StreamingChatCompletionUpdate? Complete() => _messageBuilder.Complete();
}
\ No newline at end of file
diff --git a/src/MicrosoftAi/StreamingChatCompletionUpdateBuilder.cs b/src/MicrosoftAi/StreamingChatCompletionUpdateBuilder.cs
index bc007d1..0a37613 100644
--- a/src/MicrosoftAi/StreamingChatCompletionUpdateBuilder.cs
+++ b/src/MicrosoftAi/StreamingChatCompletionUpdateBuilder.cs
@@ -5,9 +5,9 @@
namespace OllamaSharp.MicrosoftAi;
///
-/// A builder that can append streamed completion updates to one single completion update
+/// A builder that can append to one single completion update
///
-public class StreamingChatCompletionUpdateBuilder
+internal class StreamingChatCompletionUpdateBuilder
{
private readonly StringBuilder _contentBuilder = new();
private StreamingChatCompletionUpdate? _first;
@@ -36,13 +36,16 @@ public void Append(StreamingChatCompletionUpdate? update)
//_first.Contents and .Text will be set in Complete() with values collected from each update
//_first.RawRepresentation makes no sense
+ // TODO: Check if this can ever be null. The docs imply not.
if (update.Contents is not null)
Contents.AddRange(update.Contents);
}
///
- /// Builds the final completion update out of the streamed updates that were appended before
+ /// Builds the final consolidated out of the streamed
+ /// updates that were appended before
///
+ /// The final consolidated object
public StreamingChatCompletionUpdate? Complete()
{
if (_first is null)
@@ -57,5 +60,6 @@ public void Append(StreamingChatCompletionUpdate? update)
///
/// Gets or sets the list of all content elements received from completion updates
///
+ /// A of elements
public List Contents { get; set; } = [];
}
\ No newline at end of file
diff --git a/src/Models/CopyModel.cs b/src/Models/CopyModel.cs
index c3676b2..164b88b 100644
--- a/src/Models/CopyModel.cs
+++ b/src/Models/CopyModel.cs
@@ -3,7 +3,8 @@
namespace OllamaSharp.Models;
///
-/// https://github.com/jmorganca/ollama/blob/main/docs/api.md#copy-a-model
+/// Copy a model. Creates a model with another name from an existing model.
+/// Ollama API docs
///
public class CopyModelRequest : OllamaRequest
{
diff --git a/src/Models/CreateModel.cs b/src/Models/CreateModel.cs
index f40f470..4d18694 100644
--- a/src/Models/CreateModel.cs
+++ b/src/Models/CreateModel.cs
@@ -3,7 +3,14 @@
namespace OllamaSharp.Models;
///
-/// https://github.com/jmorganca/ollama/blob/main/docs/api.md#create-a-model
+/// Create a model from a Modelfile. It is recommended to set to the
+/// content of the Modelfile rather than just set path. This is a requirement
+/// for remote create. Remote model creation must also create any file blobs,
+/// fields such as FROM and ADAPTER, explicitly with the server using Create a
+/// Blob and the value to the path indicated in the response.
+///
+/// Ollama API docs
+///
///
[JsonUnmappedMemberHandling(JsonUnmappedMemberHandling.Skip)]
public class CreateModelRequest : OllamaRequest
diff --git a/src/Models/DeleteModel.cs b/src/Models/DeleteModel.cs
index ff76be8..9d450c1 100644
--- a/src/Models/DeleteModel.cs
+++ b/src/Models/DeleteModel.cs
@@ -3,7 +3,9 @@
namespace OllamaSharp.Models;
///
-/// https://github.com/jmorganca/ollama/blob/main/docs/api.md#delete-a-model
+/// Delete a model and its data.
+///
+/// Ollama API docs
///
[JsonUnmappedMemberHandling(JsonUnmappedMemberHandling.Skip)]
public class DeleteModelRequest : OllamaRequest
diff --git a/src/Models/Embed.cs b/src/Models/Embed.cs
index 1c39a24..2d58ca7 100644
--- a/src/Models/Embed.cs
+++ b/src/Models/Embed.cs
@@ -4,7 +4,9 @@
namespace OllamaSharp.Models;
///
-/// https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-embeddings
+/// Generate embeddings from a model.
+///
+/// Ollama API docs
///
public class EmbedRequest : OllamaRequest
{
diff --git a/src/Models/Generate.cs b/src/Models/Generate.cs
index 25ea26d..d0a2c2f 100644
--- a/src/Models/Generate.cs
+++ b/src/Models/Generate.cs
@@ -4,7 +4,11 @@
namespace OllamaSharp.Models;
///
-/// https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
+/// Generate a response for a given prompt with a provided model. This is a
+/// streaming endpoint, so there will be a series of responses. The final
+/// response object will include statistics and additional data from the request.
+///
+/// Ollama API docs
///
public class GenerateRequest : OllamaRequest
{
diff --git a/src/Models/ListModels.cs b/src/Models/ListModels.cs
index 8b7ddaa..72a888b 100644
--- a/src/Models/ListModels.cs
+++ b/src/Models/ListModels.cs
@@ -5,8 +5,9 @@
namespace OllamaSharp.Models;
///
-/// Represents the response from the API call to list local models.
-///
+/// List models that are available locally.
+///
+/// Ollama API docs
///
public class ListModelsResponse
{
diff --git a/src/Models/ListRunningModels.cs b/src/Models/ListRunningModels.cs
index bd17cb8..682a376 100644
--- a/src/Models/ListRunningModels.cs
+++ b/src/Models/ListRunningModels.cs
@@ -5,7 +5,9 @@
namespace OllamaSharp.Models;
///
-/// A response from the /api/ps endpoint.
+/// List models that are currently loaded into memory.
+///
+/// Ollama API docs
///
[JsonUnmappedMemberHandling(JsonUnmappedMemberHandling.Skip)]
public class ListRunningModelsResponse
diff --git a/src/Models/PullModel.cs b/src/Models/PullModel.cs
index c3d30b0..69b1953 100644
--- a/src/Models/PullModel.cs
+++ b/src/Models/PullModel.cs
@@ -3,8 +3,10 @@
namespace OllamaSharp.Models;
///
-/// Represents a request to pull a model from the API.
-///
+/// Download a model from the ollama library. Cancelled pulls are resumed from
+/// where they left off, and multiple calls will share the same download progress.
+///
+/// Ollama API docs
///
public class PullModelRequest : OllamaRequest
{
diff --git a/src/Models/PushModel.cs b/src/Models/PushModel.cs
index 67b4c24..61495f0 100644
--- a/src/Models/PushModel.cs
+++ b/src/Models/PushModel.cs
@@ -3,7 +3,10 @@
namespace OllamaSharp.Models;
///
-/// Represents a request to push a model.
+/// Upload a model to a model library. Requires registering for ollama.ai and
+/// adding a public key first.
+///
+/// Ollama API docs
///
public class PushModelRequest : OllamaRequest
{
diff --git a/src/Models/ShowModel.cs b/src/Models/ShowModel.cs
index 72a61d0..d77cdb2 100644
--- a/src/Models/ShowModel.cs
+++ b/src/Models/ShowModel.cs
@@ -4,7 +4,10 @@
namespace OllamaSharp.Models;
///
-/// Represents a request to show model information.
+/// Show information about a model including details, modelfile, template,
+/// parameters, license, system prompt.
+///
+/// Ollama API docs
///
[JsonUnmappedMemberHandling(JsonUnmappedMemberHandling.Skip)]
public class ShowModelRequest : OllamaRequest
diff --git a/src/OllamaApiClient.cs b/src/OllamaApiClient.cs
index 9d16e94..8a2c754 100644
--- a/src/OllamaApiClient.cs
+++ b/src/OllamaApiClient.cs
@@ -51,7 +51,7 @@ public class OllamaApiClient : IOllamaApiClient, IChatClient, IEmbeddingGenerato
public string SelectedModel { get; set; }
///
- /// Gets the HTTP client that is used to communicate with the Ollama API.
+ /// Gets the used to communicate with the Ollama API.
///
private readonly HttpClient _client;
@@ -214,7 +214,8 @@ public async Task IsRunningAsync(CancellationToken cancellationToken = def
public async Task GetVersionAsync(CancellationToken cancellationToken = default)
{
var data = await GetAsync("api/version", cancellationToken).ConfigureAwait(false);
- return Version.Parse(data["version"]?.ToString());
+ var versionString = data["version"]?.ToString() ?? throw new InvalidOperationException("Could not get version from response.");
+ return Version.Parse(versionString);
}
private async IAsyncEnumerable GenerateCompletionAsync(GenerateRequest generateRequest, [EnumeratorCancellation] CancellationToken cancellationToken)
diff --git a/src/OllamaApiClientExtensions.cs b/src/OllamaApiClientExtensions.cs
index 7d1dc13..e5000d1 100644
--- a/src/OllamaApiClientExtensions.cs
+++ b/src/OllamaApiClientExtensions.cs
@@ -17,6 +17,7 @@ public static class OllamaApiClientExtensions
/// The name of the existing model to copy.
/// The name the copied model should get.
/// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation.
public static Task CopyModelAsync(this IOllamaApiClient client, string source, string destination, CancellationToken cancellationToken = default)
=> client.CopyModelAsync(new CopyModelRequest { Source = source, Destination = destination }, cancellationToken);
@@ -30,6 +31,7 @@ public static Task CopyModelAsync(this IOllamaApiClient client, string source, s
/// See .
///
/// The token to cancel the operation with.
+ /// An async enumerable that can be used to iterate over the streamed responses. See .
public static IAsyncEnumerable CreateModelAsync(this IOllamaApiClient client, string name, string modelFileContent, CancellationToken cancellationToken = default)
{
var request = new CreateModelRequest
@@ -52,6 +54,7 @@ public static Task CopyModelAsync(this IOllamaApiClient client, string source, s
///
/// The name path to the model file.
/// The token to cancel the operation with.
+ /// An async enumerable that can be used to iterate over the streamed responses. See .
public static IAsyncEnumerable CreateModelAsync(this IOllamaApiClient client, string name, string modelFileContent, string path, CancellationToken cancellationToken = default)
{
var request = new CreateModelRequest
@@ -70,6 +73,7 @@ public static Task CopyModelAsync(this IOllamaApiClient client, string source, s
/// The client used to execute the command.
/// The name of the model to delete.
/// The token to cancel the operation with.
+ /// A task that represents the asynchronous operation.
public static Task DeleteModelAsync(this IOllamaApiClient client, string model, CancellationToken cancellationToken = default)
=> client.DeleteModelAsync(new DeleteModelRequest { Model = model }, cancellationToken);
@@ -79,6 +83,7 @@ public static Task DeleteModelAsync(this IOllamaApiClient client, string model,
/// The client used to execute the command.
/// The name of the model to pull.
/// The token to cancel the operation with.
+ /// An async enumerable that can be used to iterate over the streamed responses. See .
public static IAsyncEnumerable PullModelAsync(this IOllamaApiClient client, string model, CancellationToken cancellationToken = default)
=> client.PullModelAsync(new PullModelRequest { Model = model }, cancellationToken);
@@ -88,6 +93,7 @@ public static Task DeleteModelAsync(this IOllamaApiClient client, string model,
/// The client used to execute the command.
/// The name of the model to push.
/// The token to cancel the operation with.
+ /// An async enumerable that can be used to iterate over the streamed responses. See .
public static IAsyncEnumerable PushModelAsync(this IOllamaApiClient client, string name, CancellationToken cancellationToken = default)
=> client.PushModelAsync(new PushModelRequest { Model = name, Stream = true }, cancellationToken);
@@ -97,6 +103,7 @@ public static Task DeleteModelAsync(this IOllamaApiClient client, string model,
/// The client used to execute the command.
/// The input text to generate embeddings for.
/// The token to cancel the operation with.
+ /// A containing the embeddings.
public static Task EmbedAsync(this IOllamaApiClient client, string input, CancellationToken cancellationToken = default)
{
var request = new EmbedRequest
@@ -118,7 +125,7 @@ public static Task EmbedAsync(this IOllamaApiClient client, strin
/// Should reuse the result from earlier calls if these calls belong together. Can be null initially.
///
/// The token to cancel the operation with.
- /// An async enumerable that can be used to iterate over the streamed responses.
+ /// An async enumerable that can be used to iterate over the streamed responses. See .
public static IAsyncEnumerable GenerateAsync(this IOllamaApiClient client, string prompt, ConversationContext? context = null, CancellationToken cancellationToken = default)
{
var request = new GenerateRequest
@@ -137,7 +144,7 @@ public static Task EmbedAsync(this IOllamaApiClient client, strin
/// The client used to execute the command.
/// The name of the model to get the information for.
/// The token to cancel the operation with.
- /// The model information.
+ /// A task that represents the asynchronous operation. The task result contains the with the model information.
public static Task ShowModelAsync(this IOllamaApiClient client, string model, CancellationToken cancellationToken = default)
=> client.ShowModelAsync(new ShowModelRequest { Model = model }, cancellationToken);
}
diff --git a/src/OllamaSharp.csproj b/src/OllamaSharp.csproj
index 8e2f1dd..262f7c2 100644
--- a/src/OllamaSharp.csproj
+++ b/src/OllamaSharp.csproj
@@ -37,9 +37,14 @@
+
+
+ <_Parameter1>Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100a171f1618f5d4caf94ac5e1323ed80e3e2b686509951a686b66491108cf673ec16a8507ae06e7a4cc81ac14b057659e84401f1d56e17023037c160f4e3e35f6de81c479a778c78a184d15b4ccce943d2202eeeaec0c63028e1061ef5ca236b7a7d7fc52eef66d1fc65ebb89560b8dffb2353dfd2394ef2b1ec41bc3accab7df0
+
+
+
-
-
+
\ No newline at end of file
diff --git a/test/Tests.csproj b/test/Tests.csproj
index 74322e2..1a72fdf 100644
--- a/test/Tests.csproj
+++ b/test/Tests.csproj
@@ -1,32 +1,34 @@
-
- net8.0
- enable
- enable
- false
- true
- IDE0065;IDE0055;IDE0011;CS8602;CS8604;S6608
-
+
+ net8.0
+ enable
+ enable
+ false
+ true
+ IDE0065;IDE0055;IDE0011;CS8602;CS8604;S6608
+ True
+ ..\OllamaSharp.snk
+
-
-
-
-
-
-
-
- all
- runtime; build; native; contentfiles; analyzers; buildtransitive
-
-
- all
- runtime; build; native; contentfiles; analyzers; buildtransitive
-
-
+
+
+
+
+
+
+
+ all
+ runtime; build; native; contentfiles; analyzers; buildtransitive
+
+
+ all
+ runtime; build; native; contentfiles; analyzers; buildtransitive
+
+
-
-
-
+
+
+
diff --git a/toc.yml b/toc.yml
new file mode 100644
index 0000000..920d2e2
--- /dev/null
+++ b/toc.yml
@@ -0,0 +1,4 @@
+- name: Docs
+ href: docs/
+- name: API
+ href: api/OllamaSharp.html
\ No newline at end of file