From b6643e5039ae210dbc54ae6aa0f4dcf90b2269a8 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 7 Jul 2023 09:11:30 -0700 Subject: [PATCH] Add decode functions to llama.cpp/exllama --- modules/exllama.py | 3 +++ modules/llamacpp_model.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/modules/exllama.py b/modules/exllama.py index f685a445..ecfb10a4 100644 --- a/modules/exllama.py +++ b/modules/exllama.py @@ -120,3 +120,6 @@ class ExllamaModel: def encode(self, string, **kwargs): return self.tokenizer.encode(string) + + def decode(self, string, **kwargs): + return self.tokenizer.decode(string)[0] diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 10a852db..4899ad99 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -65,6 +65,9 @@ class LlamaCppModel: return self.model.tokenize(string) + def decode(self, tokens): + return self.model.detokenize(tokens) + def generate(self, prompt, state, callback=None): prompt = prompt if type(prompt) is str else prompt.decode() completion_chunks = self.model.create_completion(