From 3f26ab457cd021467ca821fc5808c7cb90d4cb4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Letzelter?= <34820539+Seb-Ltz@users.noreply.github.com> Date: Sat, 15 Feb 2025 22:45:37 +0100 Subject: [PATCH] Remove double negations for checkCancellation function (#98) # Remove double negations for `checkCancellation` function ## :recycle: Current situation & Problem As mentionned in issue #83 as well as in the PR [#81 (discussion)](https://github.com/StanfordSpezi/SpeziLLM/pull/81#discussion_r1885831907), the SpeziLLM package is currently using a double negation (`guard` followed by `!`) for every `checkCancellation()` function call. As double negations make the code harder to read, we could use `if` instead. ## :gear: Release Notes * Remove the double negations for `checkCancellation` by replacing the `guard` statements that are followed by a not operator into an `if` statement. ## :pencil: Code of Conduct & Contributing Guidelines By submitting creating this pull request, you agree to follow our [Code of Conduct](https://github.com/StanfordSpezi/.github/blob/main/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/StanfordSpezi/.github/blob/main/CONTRIBUTING.md): - [x] I agree to follow the [Code of Conduct](https://github.com/StanfordSpezi/.github/blob/main/CODE_OF_CONDUCT.md) and [Contributing Guidelines](https://github.com/StanfordSpezi/.github/blob/main/CONTRIBUTING.md). --- Sources/SpeziLLM/Mock/LLMMockSession.swift | 4 ++-- Sources/SpeziLLMFog/LLMFogSession+Generation.swift | 2 +- Sources/SpeziLLMLocal/LLMLocalSession+Generate.swift | 4 ++-- Sources/SpeziLLMLocal/LLMLocalSession.swift | 2 +- Sources/SpeziLLMOpenAI/LLMOpenAISession+Generation.swift | 4 ++-- Sources/SpeziLLMOpenAI/LLMOpenAISession.swift | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Sources/SpeziLLM/Mock/LLMMockSession.swift b/Sources/SpeziLLM/Mock/LLMMockSession.swift index 2169110f..21302166 100644 --- a/Sources/SpeziLLM/Mock/LLMMockSession.swift +++ b/Sources/SpeziLLM/Mock/LLMMockSession.swift @@ -48,7 +48,7 @@ public final class LLMMockSession: LLMSession, @unchecked Sendable { self.state = .loading } try? await Task.sleep(for: .seconds(1)) - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return } @@ -60,7 +60,7 @@ public final class LLMMockSession: LLMSession, @unchecked Sendable { let tokens = ["Mock ", "Message ", "from ", "SpeziLLM!"] for token in tokens { try? await Task.sleep(for: .milliseconds(500)) - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return } await injectAndYield(token, on: continuation) diff --git a/Sources/SpeziLLMFog/LLMFogSession+Generation.swift b/Sources/SpeziLLMFog/LLMFogSession+Generation.swift index c37a4a58..209fe9af 100644 --- a/Sources/SpeziLLMFog/LLMFogSession+Generation.swift +++ b/Sources/SpeziLLMFog/LLMFogSession+Generation.swift @@ -37,7 +37,7 @@ extension LLMFogSession { do { for try await streamResult in chatStream { - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { Self.logger.debug("SpeziLLMFog: LLM inference cancelled because of Task cancellation.") return } diff --git a/Sources/SpeziLLMLocal/LLMLocalSession+Generate.swift b/Sources/SpeziLLMLocal/LLMLocalSession+Generate.swift index d6358ce7..ebb80ae9 100644 --- a/Sources/SpeziLLMLocal/LLMLocalSession+Generate.swift +++ b/Sources/SpeziLLMLocal/LLMLocalSession+Generate.swift @@ -51,7 +51,7 @@ extension LLMLocalSession { MLXRandom.seed(self.schema.parameters.seed ?? UInt64(Date.timeIntervalSinceReferenceDate * 1000)) - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return } @@ -163,7 +163,7 @@ extension LLMLocalSession { for token in tokens { try? await Task.sleep(for: .seconds(1)) - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return } continuation.yield(token) diff --git a/Sources/SpeziLLMLocal/LLMLocalSession.swift b/Sources/SpeziLLMLocal/LLMLocalSession.swift index a314dcc3..f66e9931 100644 --- a/Sources/SpeziLLMLocal/LLMLocalSession.swift +++ b/Sources/SpeziLLMLocal/LLMLocalSession.swift @@ -132,7 +132,7 @@ public final class LLMLocalSession: LLMSession, @unchecked Sendable { } } - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return } diff --git a/Sources/SpeziLLMOpenAI/LLMOpenAISession+Generation.swift b/Sources/SpeziLLMOpenAI/LLMOpenAISession+Generation.swift index 93e75ade..f92288a0 100644 --- a/Sources/SpeziLLMOpenAI/LLMOpenAISession+Generation.swift +++ b/Sources/SpeziLLMOpenAI/LLMOpenAISession+Generation.swift @@ -50,7 +50,7 @@ extension LLMOpenAISession { continue } - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { Self.logger.debug("SpeziLLMOpenAI: LLM inference cancelled because of Task cancellation.") return } @@ -148,7 +148,7 @@ extension LLMOpenAISession { // Errors thrown by the functions are surfaced to the user as an LLM generation error functionCallResponse = try await function.execute() } catch is CancellationError { - guard await !self.checkCancellation(on: continuation) else { + if await self.checkCancellation(on: continuation) { Self.logger.debug("SpeziLLMOpenAI: Function call execution cancelled because of Task cancellation.") throw CancellationError() } diff --git a/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift b/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift index 52347518..d4c31444 100644 --- a/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift +++ b/Sources/SpeziLLMOpenAI/LLMOpenAISession.swift @@ -140,7 +140,7 @@ public final class LLMOpenAISession: LLMSession, @unchecked Sendable { } } - guard await !checkCancellation(on: continuation) else { + if await checkCancellation(on: continuation) { return }