Skip to content

Commit

Permalink
Fixed module import error
Browse files Browse the repository at this point in the history
  • Loading branch information
zer0py2c committed Sep 22, 2024
1 parent 1141f19 commit 6b65541
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions fastchat/model/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
from fastchat.modules.exllama import ExllamaConfig, load_exllama_model
from fastchat.modules.xfastertransformer import load_xft_model, XftConfig
from fastchat.modules.gptq import GptqConfig, load_gptq_quantized
from fastchat.utils import get_gpu_memory
from fastchat.utils import get_gpu_memory, get_npu_memory

# Check an environment variable to check if we should be sharing Peft model
# weights. When false we treat all Peft models as separate.
Expand Down Expand Up @@ -861,7 +861,7 @@ def match(self, model_path: str):
return "chatglm" in model_path.lower()

def load_model(self, model_path: str, from_pretrained_kwargs: dict):
# Disable JIT on-the-fly compilation
# Disable JIT just-in-time compilation
torch.npu.set_compile_mode(jit_compile=False)
revision = from_pretrained_kwargs.get("revision", "main")
if "chatglm3" in model_path.lower():
Expand Down

0 comments on commit 6b65541

Please sign in to comment.