diff --git a/README.md b/README.md index 72033675..baaf0273 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,6 @@ This repository contains Swift community-maintained implementation over [OpenAI] - [Audio Create Speech](#audio-create-speech) - [Audio Transcriptions](#audio-transcriptions) - [Audio Translations](#audio-translations) - - [Edits](#edits) - [Embeddings](#embeddings) - [Models](#models) - [List Models](#list-models) @@ -649,71 +648,6 @@ let result = try await openAI.audioTranslations(query: query) Review [Audio Documentation](https://platform.openai.com/docs/api-reference/audio) for more info. -### Edits - -Creates a new edit for the provided input, instruction, and parameters. - -**Request** - -```swift -struct EditsQuery: Codable { - /// ID of the model to use. - public let model: Model - /// Input text to get embeddings for. - public let input: String? - /// The instruction that tells the model how to edit the prompt. - public let instruction: String - /// The number of images to generate. Must be between 1 and 10. - public let n: Int? - /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. - public let temperature: Double? - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - public let topP: Double? -} -``` - -**Response** - -```swift -struct EditsResult: Codable, Equatable { - - public struct Choice: Codable, Equatable { - public let text: String - public let index: Int - } - - public struct Usage: Codable, Equatable { - public let promptTokens: Int - public let completionTokens: Int - public let totalTokens: Int - - enum CodingKeys: String, CodingKey { - case promptTokens = "prompt_tokens" - case completionTokens = "completion_tokens" - case totalTokens = "total_tokens" - } - } - - public let object: String - public let created: TimeInterval - public let choices: [Choice] - public let usage: Usage -} -``` - -**Example** - -```swift -let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes") -openAI.edits(query: query) { result in - //Handle response here -} -//or -let result = try await openAI.edits(query: query) -``` - -Review [Edits Documentation](https://platform.openai.com/docs/api-reference/edits) for more info. - ### Embeddings Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. @@ -1003,7 +937,6 @@ func completions(query: CompletionsQuery) -> AnyPublisher AnyPublisher func embeddings(query: EmbeddingsQuery) -> AnyPublisher func chats(query: ChatQuery) -> AnyPublisher -func edits(query: EditsQuery) -> AnyPublisher func model(query: ModelQuery) -> AnyPublisher func models() -> AnyPublisher func moderations(query: ModerationsQuery) -> AnyPublisher diff --git a/Sources/OpenAI/OpenAI.swift b/Sources/OpenAI/OpenAI.swift index 49715c8e..2039fc99 100644 --- a/Sources/OpenAI/OpenAI.swift +++ b/Sources/OpenAI/OpenAI.swift @@ -88,10 +88,6 @@ final public class OpenAI: OpenAIProtocol { performStreamingRequest(request: JSONRequest(body: query.makeStreamable(), url: buildURL(path: .chats)), onResult: onResult, completion: completion) } - public func edits(query: EditsQuery, completion: @escaping (Result) -> Void) { - performRequest(request: JSONRequest(body: query, url: buildURL(path: .edits)), completion: completion) - } - public func model(query: ModelQuery, completion: @escaping (Result) -> Void) { performRequest(request: JSONRequest(url: buildURL(path: .models.withPath(query.model)), method: "GET"), completion: completion) } @@ -208,7 +204,6 @@ extension APIPath { static let completions = "/v1/completions" static let embeddings = "/v1/embeddings" static let chats = "/v1/chat/completions" - static let edits = "/v1/edits" static let models = "/v1/models" static let moderations = "/v1/moderations" diff --git a/Sources/OpenAI/Public/Models/EditsQuery.swift b/Sources/OpenAI/Public/Models/EditsQuery.swift deleted file mode 100644 index 2aeeb651..00000000 --- a/Sources/OpenAI/Public/Models/EditsQuery.swift +++ /dev/null @@ -1,32 +0,0 @@ -// -// EditsQuery.swift -// -// -// Created by Aled Samuel on 14/04/2023. -// - -import Foundation - -public struct EditsQuery: Codable { - /// ID of the model to use. - public let model: Model - /// Input text to get embeddings for. - public let input: String? - /// The instruction that tells the model how to edit the prompt. - public let instruction: String - /// The number of images to generate. Must be between 1 and 10. - public let n: Int? - /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. - public let temperature: Double? - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - public let topP: Double? - - public init(model: Model, input: String?, instruction: String, n: Int? = nil, temperature: Double? = nil, topP: Double? = nil) { - self.model = model - self.input = input - self.instruction = instruction - self.n = n - self.temperature = temperature - self.topP = topP - } -} diff --git a/Sources/OpenAI/Public/Models/EditsResult.swift b/Sources/OpenAI/Public/Models/EditsResult.swift deleted file mode 100644 index b318aca0..00000000 --- a/Sources/OpenAI/Public/Models/EditsResult.swift +++ /dev/null @@ -1,33 +0,0 @@ -// -// EditsResult.swift -// -// -// Created by Aled Samuel on 14/04/2023. -// - -import Foundation - -public struct EditsResult: Codable, Equatable { - - public struct Choice: Codable, Equatable { - public let text: String - public let index: Int - } - - public struct Usage: Codable, Equatable { - public let promptTokens: Int - public let completionTokens: Int - public let totalTokens: Int - - enum CodingKeys: String, CodingKey { - case promptTokens = "prompt_tokens" - case completionTokens = "completion_tokens" - case totalTokens = "total_tokens" - } - } - - public let object: String - public let created: TimeInterval - public let choices: [Choice] - public let usage: Usage -} diff --git a/Sources/OpenAI/Public/Models/Models/Models.swift b/Sources/OpenAI/Public/Models/Models/Models.swift index 514dc99b..7ca30397 100644 --- a/Sources/OpenAI/Public/Models/Models/Models.swift +++ b/Sources/OpenAI/Public/Models/Models/Models.swift @@ -84,11 +84,6 @@ public extension Model { /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. static let textAda = "text-ada-001" - // Edits - - static let textDavinci_001 = "text-davinci-001" - static let codeDavinciEdit_001 = "code-davinci-edit-001" - // Speech /// The latest text to speech model, optimized for speed. diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift index b515a234..6e221f5d 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift @@ -126,21 +126,6 @@ public extension OpenAIProtocol { } } - func edits( - query: EditsQuery - ) async throws -> EditsResult { - try await withCheckedThrowingContinuation { continuation in - edits(query: query) { result in - switch result { - case let .success(success): - return continuation.resume(returning: success) - case let .failure(failure): - return continuation.resume(throwing: failure) - } - } - } - } - func model( query: ModelQuery ) async throws -> ModelResult { diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift index c5142044..5cd00247 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift @@ -85,13 +85,6 @@ public extension OpenAIProtocol { return progress.eraseToAnyPublisher() } - func edits(query: EditsQuery) -> AnyPublisher { - Future { - edits(query: query, completion: $0) - } - .eraseToAnyPublisher() - } - func model(query: ModelQuery) -> AnyPublisher { Future { model(query: query, completion: $0) diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift index 8c65b190..91f778d9 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift @@ -147,23 +147,6 @@ public protocol OpenAIProtocol { **/ func chatsStream(query: ChatQuery, onResult: @escaping (Result) -> Void, completion: ((Error?) -> Void)?) - /** - This function sends an edits query to the OpenAI API and retrieves an edited version of the prompt based on the instruction given. - - Example: - ``` - let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes") - openAI.edits(query: query) { result in - //Handle response here - } - ``` - - - Parameters: - - query: An `EditsQuery` object containing the input parameters for the API request. This includes the input to be edited, the instruction specifying how it should be edited, and other settings. - - completion: A closure which receives the result when the API request finishes. The closure's parameter, `Result`, will contain either the `EditsResult` object with the model's response to the queried edit, or an error if the request failed. - **/ - func edits(query: EditsQuery, completion: @escaping (Result) -> Void) - /** This function sends a model query to the OpenAI API and retrieves a model instance, providing owner information. The Models API in this usage enables you to gather detailed information on the model in question, like GPT-3. diff --git a/Tests/OpenAITests/OpenAITests.swift b/Tests/OpenAITests/OpenAITests.swift index 7285dd16..11d8b6d5 100644 --- a/Tests/OpenAITests/OpenAITests.swift +++ b/Tests/OpenAITests/OpenAITests.swift @@ -150,26 +150,6 @@ class OpenAITests: XCTestCase { XCTAssertEqual(inError, apiError) } - func testEdits() async throws { - let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes") - let editsResult = EditsResult(object: "edit", created: 1589478378, choices: [ - .init(text: "What day of the week is it?", index: 0) - ], usage: .init(promptTokens: 25, completionTokens: 32, totalTokens: 57)) - try self.stub(result: editsResult) - - let result = try await openAI.edits(query: query) - XCTAssertEqual(result, editsResult) - } - - func testEditsError() async throws { - let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes") - let inError = APIError(message: "foo", type: "bar", param: "baz", code: "100") - self.stub(error: inError) - - let apiError: APIError = try await XCTExpectError { try await openAI.edits(query: query) } - XCTAssertEqual(inError, apiError) - } - func testEmbeddings() async throws { let query = EmbeddingsQuery( input: .string("The food was delicious and the waiter..."), diff --git a/Tests/OpenAITests/OpenAITestsCombine.swift b/Tests/OpenAITests/OpenAITestsCombine.swift index b7918b44..126a26b6 100644 --- a/Tests/OpenAITests/OpenAITestsCombine.swift +++ b/Tests/OpenAITests/OpenAITestsCombine.swift @@ -51,16 +51,6 @@ final class OpenAITestsCombine: XCTestCase { XCTAssertEqual(result, chatResult) } - func testEdits() throws { - let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes") - let editsResult = EditsResult(object: "edit", created: 1589478378, choices: [ - .init(text: "What day of the week is it?", index: 0) - ], usage: .init(promptTokens: 25, completionTokens: 32, totalTokens: 57)) - try self.stub(result: editsResult) - let result = try awaitPublisher(openAI.edits(query: query)) - XCTAssertEqual(result, editsResult) - } - func testEmbeddings() throws { let query = EmbeddingsQuery(input: .string("The food was delicious and the waiter..."), model: .textEmbeddingAda) let embeddingsResult = EmbeddingsResult(data: [ diff --git a/Tests/OpenAITests/OpenAITestsDecoder.swift b/Tests/OpenAITests/OpenAITestsDecoder.swift index d9672c04..14ef38e5 100644 --- a/Tests/OpenAITests/OpenAITestsDecoder.swift +++ b/Tests/OpenAITests/OpenAITestsDecoder.swift @@ -252,31 +252,6 @@ class OpenAITestsDecoder: XCTestCase { systemFingerprint: nil) try decode(data, expectedValue) } - - func testEdits() async throws { - let data = """ - { - "object": "edit", - "created": 1589478378, - "choices": [ - { - "text": "What day of the week is it?", - "index": 0, - } - ], - "usage": { - "prompt_tokens": 25, - "completion_tokens": 32, - "total_tokens": 57 - } - } - """ - - let expectedValue = EditsResult(object: "edit", created: 1589478378, choices: [ - .init(text: "What day of the week is it?", index: 0) - ], usage: .init(promptTokens: 25, completionTokens: 32, totalTokens: 57)) - try decode(data, expectedValue) - } func testEmbeddings() async throws { let data = """