Fixing Vicuna text generation (#1579)

This commit is contained in:
TiagoGF 2023-04-26 15:20:27 -04:00 committed by GitHub
parent d87ca8f2af
commit a941c19337
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 4 additions and 2 deletions

View file

@ -1,4 +1,4 @@
name: "### Assistant:"
your_name: "### Human:"
context: "A chat between a human and an assistant.\n\n"
turn_template: "<|user|> <|user-message|>\n<|bot|> <|bot-message|>\n"
turn_template: "<|user|> <|user-message|>\n<|bot|><|bot-message|>\n"

View file

@ -1,4 +1,4 @@
name: "ASSISTANT:"
your_name: "USER:"
context: "A chat between a user and an assistant.\n\n"
turn_template: "<|user|> <|user-message|>\n<|bot|> <|bot-message|></s>\n"
turn_template: "<|user|> <|user-message|>\n<|bot|><|bot-message|></s>\n"

View file

@ -27,6 +27,8 @@ llama-[0-9]*b-4bit$:
.*vicuna:
mode: 'instruct'
instruction_template: 'Vicuna'
anon8231489123_vicuna-13b-GPTQ-4bit-128g:
instruction_template: 'Vicuna-v0'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'