Skip to content

Commit

Permalink
added default inits for the new AudioTranscriptionResult and added a …
Browse files Browse the repository at this point in the history
…test case
  • Loading branch information
Frank-Buss committed Nov 8, 2024
1 parent 90acb23 commit be88f6a
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 2 deletions.
16 changes: 15 additions & 1 deletion Sources/OpenAI/Public/Models/AudioTranscriptionResult.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,21 @@ public struct AudioTranscriptionResult: Codable, Equatable {
public let text: String
/// The segments containing detailed information (only present in verbose_json format)
public let segments: [Segment]?


public init(
task: String? = nil,
language: String? = nil,
duration: Double? = nil,
text: String,
segments: [Segment]? = nil
) {
self.task = task
self.language = language
self.duration = duration
self.text = text
self.segments = segments
}

public struct Segment: Codable, Equatable {
public let id: Int
public let seek: Int
Expand Down
33 changes: 32 additions & 1 deletion Tests/OpenAITests/OpenAITests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,38 @@ class OpenAITests: XCTestCase {
let result = try await openAI.audioTranscriptions(query: query)
XCTAssertEqual(result, transcriptionResult)
}


func testVerboseJsonAudioTranscriptions() async throws {
let data = Data()
let query = AudioTranscriptionQuery(file: data, fileType: .m4a, model: .whisper_1, responseFormat: .verboseJson)

let transcriptionResult = AudioTranscriptionResult(
task: "transcribe",
language: "english",
duration: 3.759999990463257,
text: "This is a test.",
segments: [
AudioTranscriptionResult.Segment(
id: 0,
seek: 0,
start: 0,
end: 3.759999990463257,
text: " This is a test.",
tokens: [50364, 639, 307, 257, 1500, 13, 50552],
temperature: 0,
avg_logprob: -0.5153926610946655,
compression_ratio: 0.7142857313156128,
no_speech_prob: 0.08552933484315872
)
]
)

try self.stub(result: transcriptionResult)

let result = try await openAI.audioTranscriptions(query: query)
XCTAssertEqual(result, transcriptionResult)
}

func testAudioTranscriptionsError() async throws {
let data = Data()
let query = AudioTranscriptionQuery(file: data, fileType: .m4a, model: .whisper_1)
Expand Down

0 comments on commit be88f6a

Please sign in to comment.