diff --git a/Colab-TextGen-GPU.ipynb b/Colab-TextGen-GPU.ipynb index f8b6719f..f73947bf 100644 --- a/Colab-TextGen-GPU.ipynb +++ b/Colab-TextGen-GPU.ipynb @@ -87,8 +87,8 @@ " !pip uninstall -y flash_attn\n", "\n", "# Parameters\n", - "model_url = \"https://huggingface.co/turboderp/Mistral-7B-instruct-exl2\" #@param {type:\"string\"}\n", - "branch = \"4.0bpw\" #@param {type:\"string\"}\n", + "model_url = \"https://huggingface.co/TheBloke/MythoMax-L2-13B-GPTQ\" #@param {type:\"string\"}\n", + "branch = \"gptq-4bit-32g-actorder_True\" #@param {type:\"string\"}\n", "command_line_flags = \"--n-gpu-layers 128 --load-in-4bit --use_double_quant\" #@param {type:\"string\"}\n", "api = False #@param {type:\"boolean\"}\n", "\n", diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index aa0fedbf..8b133e98 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -105,6 +105,7 @@ class LlamaCppModel: return self.model.detokenize(ids).decode('utf-8') def get_logits(self, tokens): + self.model.reset() self.model.eval(tokens) logits = self.model._scores logits = np.expand_dims(logits, 0) # batch dim is expected diff --git a/modules/models.py b/modules/models.py index 19c0d903..c7dd6ccb 100644 --- a/modules/models.py +++ b/modules/models.py @@ -102,6 +102,7 @@ def load_model(model_name, loader=None): elif loader in ['llama.cpp', 'llamacpp_HF', 'ctransformers']: shared.settings['truncation_length'] = shared.args.n_ctx + logger.info(f"LOADER: {loader}") logger.info(f"TRUNCATION LENGTH: {shared.settings['truncation_length']}") logger.info(f"INSTRUCTION TEMPLATE: {shared.settings['instruction_template']}") logger.info(f"Loaded the model in {(time.time()-t0):.2f} seconds.")