From aa2de0668011225d7a69c20b19aa40857dbd553c Mon Sep 17 00:00:00 2001 From: Waheed Barghouthi Date: Wed, 7 Aug 2024 08:18:11 +0200 Subject: [PATCH] Don't fail on AMD llama_cpp_python_hijack.py check if hip is compiled already and if so dont consider loading cuda but used the compiled llama.cpp with hip --- modules/llama_cpp_python_hijack.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/llama_cpp_python_hijack.py b/modules/llama_cpp_python_hijack.py index 64280dc9..d241e894 100644 --- a/modules/llama_cpp_python_hijack.py +++ b/modules/llama_cpp_python_hijack.py @@ -1,9 +1,9 @@ import importlib import platform +import torch + from typing import Sequence - from tqdm import tqdm - from modules import shared from modules.cache_utils import process_llamacpp_cache @@ -22,6 +22,11 @@ def llama_cpp_lib(): lib_names = [ (None, 'llama_cpp') ] + elif torch.version.hip: + lib_names = [ + ('cpu', 'llama_cpp'), + (None, 'llama_cpp') + ] else: lib_names = [ ('cpu', 'llama_cpp'),