From 907702c204c6a200ebbdec0f4b4471136662ccf5 Mon Sep 17 00:00:00 2001 From: Tisjwlf <156173182+Tisjwlf@users.noreply.github.com> Date: Mon, 20 May 2024 01:22:09 +0200 Subject: [PATCH] Fix gguf multipart file loading (#5857) --- modules/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 687af8ba..cac66393 100644 --- a/modules/models.py +++ b/modules/models.py @@ -266,7 +266,7 @@ def llamacpp_loader(model_name): if path.is_file(): model_file = path else: - model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf'))[0] + model_file = sorted(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf'))[0] logger.info(f"llama.cpp weights detected: \"{model_file}\"") model, tokenizer = LlamaCppModel.from_pretrained(model_file)