mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-09-20 18:45:09 +02:00
Bump llama-cpp-python to use LlamaCache
This commit is contained in:
parent
ac189011cb
commit
d2ea925fa5
2 changed files with 4 additions and 3 deletions
|
@ -6,7 +6,7 @@ Documentation:
|
||||||
https://abetlen.github.io/llama-cpp-python/
|
https://abetlen.github.io/llama-cpp-python/
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from llama_cpp import Llama
|
from llama_cpp import Llama, LlamaCache
|
||||||
|
|
||||||
from modules import shared
|
from modules import shared
|
||||||
from modules.callbacks import Iteratorize
|
from modules.callbacks import Iteratorize
|
||||||
|
@ -27,6 +27,7 @@ class LlamaCppModel:
|
||||||
'n_threads': shared.args.threads or None
|
'n_threads': shared.args.threads or None
|
||||||
}
|
}
|
||||||
self.model = Llama(**params)
|
self.model = Llama(**params)
|
||||||
|
self.model.set_cache(LlamaCache)
|
||||||
|
|
||||||
# This is ugly, but the model and the tokenizer are the same object in this library.
|
# This is ugly, but the model and the tokenizer are the same object in this library.
|
||||||
return result, result
|
return result, result
|
||||||
|
|
|
@ -14,5 +14,5 @@ tqdm
|
||||||
git+https://github.com/huggingface/peft
|
git+https://github.com/huggingface/peft
|
||||||
transformers==4.28.0
|
transformers==4.28.0
|
||||||
bitsandbytes==0.38.1; platform_system != "Windows"
|
bitsandbytes==0.38.1; platform_system != "Windows"
|
||||||
llama-cpp-python==0.1.33; platform_system != "Windows"
|
llama-cpp-python==0.1.34; platform_system != "Windows"
|
||||||
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.33/llama_cpp_python-0.1.33-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.34/llama_cpp_python-0.1.34-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
Loading…
Reference in a new issue