From dd264e79bc57312dd9eb2c2c3580e244b1427a2a Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 29 Feb 2024 11:31:54 +0100 Subject: [PATCH] add with_main_gpu to LlamaModelParams This commit adds a `with_main_gpu` method to `LlamaModelParams` which allows the main GPU to be set. Signed-off-by: Daniel Bevenius --- llama-cpp-2/src/model/params.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/llama-cpp-2/src/model/params.rs b/llama-cpp-2/src/model/params.rs index dfd48583..5631ae9c 100644 --- a/llama-cpp-2/src/model/params.rs +++ b/llama-cpp-2/src/model/params.rs @@ -56,6 +56,13 @@ impl LlamaModelParams { self } + /// sets the main GPU + #[must_use] + pub fn with_main_gpu(mut self, main_gpu: i32) -> Self { + self.params.main_gpu = main_gpu; + self + } + /// sets `vocab_only` #[must_use] pub fn with_vocab_only(mut self, vocab_only: bool) -> Self {