Skip to content

Commit

Permalink
Add to protocol + Error test
Browse files Browse the repository at this point in the history
  • Loading branch information
Ihor Makhnyk committed Nov 14, 2023
1 parent f34bacd commit 5015ff6
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 0 deletions.
15 changes: 15 additions & 0 deletions Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,21 @@ public extension OpenAIProtocol {
}
}

func audioCreateSpeech(
query: AudioSpeechQuery
) async throws -> AudioSpeechResult {
try await withCheckedThrowingContinuation { continuation in
audioCreateSpeech(query: query) { result in
switch result {
case let .success(success):
return continuation.resume(returning: success)
case let .failure(failure):
return continuation.resume(throwing: failure)
}
}
}
}

func audioTranscriptions(
query: AudioTranscriptionQuery
) async throws -> AudioTranscriptionResult {
Expand Down
17 changes: 17 additions & 0 deletions Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,23 @@ public protocol OpenAIProtocol {
**/
func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)

/**
This function sends an `AudioSpeechQuery` to the OpenAI API to create audio speech from text using a specific voice and format.

Example:
```
let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, response_format: .mp3, speed: 1.0)
openAI.audioCreateSpeech(query: query) { result in
// Handle response here
}
```

- Parameters:
- query: An `AudioSpeechQuery` object containing the parameters for the API request. This includes the Text-to-Speech model to be used, input text, voice to be used for generating the audio, the desired audio format, and the speed of the generated audio.
- completion: A closure which receives the result. The closure's parameter, `Result<AudioSpeechResult, Error>`, will either contain the `AudioSpeechResult` object with the audio data or an error if the request failed.
*/
func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result<AudioSpeechResult, Error>) -> Void)

/**
Transcribes audio data using OpenAI's audio transcription API and completes the operation asynchronously.

Expand Down
9 changes: 9 additions & 0 deletions Tests/OpenAITests/OpenAITests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,15 @@ class OpenAITests: XCTestCase {
XCTAssertEqual(inError, apiError)
}

func testAudioSpeechError() async throws {
let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, response_format: .mp3, speed: 1.0)
let inError = APIError(message: "foo", type: "bar", param: "baz", code: "100")
self.stub(error: inError)

let apiError: APIError = try await XCTExpectError { try await openAI.audioCreateSpeech(query: query) }
XCTAssertEqual(inError, apiError)
}

func testAudioTranscriptions() async throws {
let data = Data()
let query = AudioTranscriptionQuery(file: data, fileName: "audio.m4a", model: .whisper_1)
Expand Down

0 comments on commit 5015ff6

Please sign in to comment.