diff --git a/examples/simple/src/main.rs b/examples/simple/src/main.rs index 2cea197d..9d4eef47 100644 --- a/examples/simple/src/main.rs +++ b/examples/simple/src/main.rs @@ -10,7 +10,6 @@ use anyhow::{anyhow, bail, Context, Result}; use clap::Parser; use hf_hub::api::sync::ApiBuilder; use llama_cpp_2::context::params::LlamaContextParams; -use llama_cpp_2::{ggml_time_us, send_logs_to_tracing, LogOptions}; use llama_cpp_2::llama_backend::LlamaBackend; use llama_cpp_2::llama_batch::LlamaBatch; use llama_cpp_2::model::params::kv_overrides::ParamOverrideValue; @@ -18,6 +17,7 @@ use llama_cpp_2::model::params::LlamaModelParams; use llama_cpp_2::model::LlamaModel; use llama_cpp_2::model::{AddBos, Special}; use llama_cpp_2::sampling::LlamaSampler; +use llama_cpp_2::{ggml_time_us, send_logs_to_tracing, LogOptions}; use std::ffi::CString; use std::io::Write; @@ -67,11 +67,7 @@ struct Args { help = "size of the prompt context (default: loaded from themodel)" )] ctx_size: Option, - #[arg( - short = 'v', - long, - help = "enable verbose llama.cpp logs", - )] + #[arg(short = 'v', long, help = "enable verbose llama.cpp logs")] verbose: bool, } diff --git a/llama-cpp-2/src/log.rs b/llama-cpp-2/src/log.rs index db6ff653..1c324b4b 100644 --- a/llama-cpp-2/src/log.rs +++ b/llama-cpp-2/src/log.rs @@ -171,7 +171,8 @@ impl State { } else { let level = self .previous_level - .load(std::sync::atomic::Ordering::Acquire) as llama_cpp_sys_2::ggml_log_level; + .load(std::sync::atomic::Ordering::Acquire) + as llama_cpp_sys_2::ggml_log_level; tracing::warn!( inferred_level = level, text = text,