Count the tokens more conservatively

This commit is contained in:
oobabooga 2023-03-04 03:10:21 -03:00
parent 736f61610b
commit c93f1fa99b

View file

@ -23,9 +23,9 @@ def get_max_prompt_length(tokens):
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
# These models do not have explicit tokenizers for now, so
# we return an estimate on the number of tokens
# we return an estimate for the number of tokens
if shared.is_RWKV or shared.is_LLaMA:
return np.zeros((1, len(prompt)//5))
return np.zeros((1, len(prompt)//4))
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
if shared.args.cpu: