Don't fail on AMD llama_cpp_python_hijack.py

check if hip is compiled already and if so dont consider loading cuda but used the compiled llama.cpp with hip
This commit is contained in:
Waheed Barghouthi 2024-08-07 08:18:11 +02:00 committed by GitHub
parent d011040f43
commit aa2de06680
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: B5690EEEBB952194

View file

@ -1,9 +1,9 @@
import importlib import importlib
import platform import platform
import torch
from typing import Sequence from typing import Sequence
from tqdm import tqdm from tqdm import tqdm
from modules import shared from modules import shared
from modules.cache_utils import process_llamacpp_cache from modules.cache_utils import process_llamacpp_cache
@ -22,6 +22,11 @@ def llama_cpp_lib():
lib_names = [ lib_names = [
(None, 'llama_cpp') (None, 'llama_cpp')
] ]
elif torch.version.hip:
lib_names = [
('cpu', 'llama_cpp'),
(None, 'llama_cpp')
]
else: else:
lib_names = [ lib_names = [
('cpu', 'llama_cpp'), ('cpu', 'llama_cpp'),