Use YAML for presets and settings

This commit is contained in:
oobabooga 2023-05-28 22:34:12 -03:00
parent 2cf711f35e
commit 00ebea0b2a
42 changed files with 155 additions and 169 deletions

1
.gitignore vendored
View file

@ -22,6 +22,7 @@ venv/
*.log
settings.json
settings.yaml
notification.mp3
img_bot*
img_me*

View file

@ -207,7 +207,7 @@ Optionally, you can use the following command-line flags:
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
| `--model-menu` | Show a model menu in the terminal when the web UI is first launched. |
| `--no-stream` | Don't stream the text output in real time. |
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag. |
| `--settings SETTINGS_FILE` | Load the default interface settings from this yaml file. See `settings-template.yaml` for an example. If you create a file called `settings.yaml`, this file will be loaded by default without the need to use the `--settings` flag. |
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
| `--verbose` | Print the prompts to the terminal. |

View file

@ -641,7 +641,7 @@ def save_character(name, greeting, context, picture, filename, instruct=False):
data = {k: v for k, v in data.items() if v} # Strip falsy
filepath = Path(f'{folder}/{filename}.yaml')
with filepath.open('w') as f:
yaml.dump(data, f)
yaml.dump(data, f, sort_keys=False)
logger.info(f'Wrote {filepath}')
path_to_img = Path(f'{folder}/{filename}.png')

View file

@ -65,18 +65,9 @@ settings = {
'chat_generation_attempts_min': 1,
'chat_generation_attempts_max': 10,
'default_extensions': [],
'chat_default_extensions': ["gallery"],
'presets': {
'default': 'Default',
'.*(alpaca|llama|llava|vicuna)': "LLaMA-Precise",
'.*pygmalion': 'NovelAI-Storywriter',
'.*RWKV.*\.pth': 'Naive',
'.*moss': 'MOSS',
},
'prompts': {
'default': 'QA',
'.*(gpt4chan|gpt-4chan|4chan)': 'GPT-4chan',
}
'chat_default_extensions': ['gallery'],
'preset': 'LLaMA-Precise',
'prompt': 'QA',
}
@ -103,7 +94,7 @@ parser.add_argument("--model-dir", type=str, default='models/', help="Path to di
parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
parser.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
parser.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')

View file

@ -29,7 +29,7 @@ def get_available_models():
def get_available_presets():
return sorted(set((k.stem for k in Path('presets').glob('*.txt'))), key=natural_keys)
return sorted(set((k.stem for k in Path('presets').glob('*.yaml'))), key=natural_keys)
def get_available_prompts():

View file

@ -1,3 +0,0 @@
do_sample=False
penalty_alpha=0.6
top_k=4

View file

@ -0,0 +1,3 @@
do_sample: False
penalty_alpha: 0.6
top_k: 4

View file

@ -1 +0,0 @@
do_sample=False

View file

@ -0,0 +1 @@
do_sample: False

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.5
top_k=40
temperature=0.7
repetition_penalty=1.2
typical_p=1.0

6
presets/Default.yaml Normal file
View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.5
top_k: 40
temperature: 0.7
repetition_penalty: 1.2
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.5
top_k=0
temperature=0.7
repetition_penalty=1.1
typical_p=0.19

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.5
top_k: 0
temperature: 0.7
repetition_penalty: 1.1
typical_p: 0.19

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=1.0
top_k=0
temperature=0.66
repetition_penalty=1.1
typical_p=0.6

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 1.0
top_k: 0
temperature: 0.66
repetition_penalty: 1.1
typical_p: 0.6

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.1
top_k=40
temperature=0.7
repetition_penalty=1.18
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.1
top_k: 40
temperature: 0.7
repetition_penalty: 1.18
typical_p: 1.0

View file

@ -1,3 +0,0 @@
temperature=0.7
top_p=0.8
repetition_penalty=1.02

3
presets/MOSS.yaml Normal file
View file

@ -0,0 +1,3 @@
temperature: 0.7
top_p: 0.8
repetition_penalty: 1.02

View file

@ -1,4 +0,0 @@
do_sample=True
temperature=0.7
top_p=0.85
top_k=50

4
presets/Naive.yaml Normal file
View file

@ -0,0 +1,4 @@
do_sample: true
temperature: 0.7
top_p: 0.85
top_k: 50

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.9
top_k=100
temperature=0.8
repetition_penalty=1.15
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.9
top_k: 100
temperature: 0.8
repetition_penalty: 1.15
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=1.0
top_k=100
temperature=2
repetition_penalty=1
typical_p=0.97

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 1.0
top_k: 100
temperature: 2
repetition_penalty: 1
typical_p: 0.97

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.98
top_k=0
temperature=0.63
repetition_penalty=1.05
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.98
top_k: 0
temperature: 0.63
repetition_penalty: 1.05
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.85
top_k=12
temperature=2
repetition_penalty=1.15
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.85
top_k: 12
temperature: 2
repetition_penalty: 1.15
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=1.0
top_k=100
temperature=1.07
repetition_penalty=1.05
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 1.0
top_k: 100
temperature: 1.07
repetition_penalty: 1.05
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=1.0
top_k=0
temperature=0.44
repetition_penalty=1.15
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 1.0
top_k: 0
temperature: 0.44
repetition_penalty: 1.15
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.18
top_k=30
temperature=2.0
repetition_penalty=1.15
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.18
top_k: 30
temperature: 2.0
repetition_penalty: 1.15
typical_p: 1.0

View file

@ -1,6 +0,0 @@
do_sample=True
top_p=0.73
top_k=0
temperature=0.72
repetition_penalty=1.1
typical_p=1.0

View file

@ -0,0 +1,6 @@
do_sample: true
top_p: 0.73
top_k: 0
temperature: 0.72
repetition_penalty: 1.1
typical_p: 1.0

View file

@ -1,9 +0,0 @@
num_beams=10
min_length=200
length_penalty=1.4
no_repeat_ngram_size=2
early_stopping=True
temperature=0.7
top_k=150
top_p=0.92
repetition_penalty=4.5

View file

@ -0,0 +1,9 @@
num_beams: 10
min_length: 200
length_penalty: 1.4
no_repeat_ngram_size: 2
early_stopping: true
temperature: 0.7
top_k: 150
top_p: 0.92
repetition_penalty: 4.5

View file

@ -103,12 +103,11 @@ def load_preset_values(preset_menu, state, return_dict=False):
'mirostat_eta': 0.1,
}
with open(Path(f'presets/{preset_menu}.txt'), 'r') as infile:
preset = infile.read()
for i in preset.splitlines():
i = i.rstrip(',').strip().split('=')
if len(i) == 2 and i[0].strip() != 'tokens':
generate_params[i[0].strip()] = eval(i[1].strip())
with open(Path(f'presets/{preset_menu}.yaml'), 'r') as infile:
preset = yaml.safe_load(infile)
for k in preset:
generate_params[k] = preset[k]
generate_params['temperature'] = min(1.99, generate_params['temperature'])
if return_dict:
@ -301,7 +300,7 @@ def save_model_settings(model, state):
shared.model_config[model_regex][k] = state[k]
with open(p, 'w') as f:
f.write(yaml.dump(user_config))
f.write(yaml.dump(user_config, sort_keys=False))
yield (f"Settings for {model} saved to {p}")
@ -551,11 +550,8 @@ def create_interface():
# Defining some variables
gen_events = []
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
if len(shared.lora_names) == 1:
default_text = load_prompt(shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.lora_names[0].lower())), 'default')])
else:
default_text = load_prompt(shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')])
default_preset = shared.settings['preset']
default_text = load_prompt(shared.settings['prompt'])
title = 'Text generation web UI'
# Authentication variables
@ -1016,16 +1012,19 @@ if __name__ == "__main__":
settings_file = None
if shared.args.settings is not None and Path(shared.args.settings).exists():
settings_file = Path(shared.args.settings)
elif Path('settings.yaml').exists():
settings_file = Path('settings.yaml')
elif Path('settings.json').exists():
settings_file = Path('settings.json')
if settings_file is not None:
logger.info(f"Loading settings from {settings_file}...")
new_settings = json.loads(open(settings_file, 'r').read())
file_contents = open(settings_file, 'r', encoding='utf-8').read()
new_settings = json.loads(file_contents) if settings_file.suffix == "json" else yaml.safe_load(file_contents)
for item in new_settings:
shared.settings[item] = new_settings[item]
# Set default model settings based on settings.json
# Set default model settings based on settings file
shared.model_config['.*'] = {
'wbits': 'None',
'model_type': 'None',

View file

@ -1,47 +0,0 @@
{
"dark_theme": false,
"autoload_model": true,
"max_new_tokens": 200,
"max_new_tokens_min": 1,
"max_new_tokens_max": 2000,
"seed": -1,
"character": "None",
"name1": "You",
"name2": "Assistant",
"context": "This is a conversation with your Assistant. It is a computer program designed to help you with various tasks such as answering questions, providing recommendations, and helping with decision making. You can ask it anything you want and it will do its best to give you accurate and relevant information.",
"greeting": "",
"turn_template": "",
"custom_stopping_strings": "",
"stop_at_newline": false,
"add_bos_token": true,
"ban_eos_token": false,
"skip_special_tokens": true,
"truncation_length": 2048,
"truncation_length_min": 0,
"truncation_length_max": 8192,
"mode": "chat",
"chat_style": "cai-chat",
"instruction_template": "None",
"chat-instruct_command": "Continue the chat dialogue below. Write a single reply for the character \"<|character|>\".\n\n<|prompt|>",
"chat_prompt_size": 2048,
"chat_prompt_size_min": 0,
"chat_prompt_size_max": 2048,
"chat_generation_attempts": 1,
"chat_generation_attempts_min": 1,
"chat_generation_attempts_max": 10,
"default_extensions": [],
"chat_default_extensions": [
"gallery"
],
"presets": {
"default": "Default",
".*(alpaca|llama|llava|vicuna)": "LLaMA-Precise",
".*pygmalion": "NovelAI-Storywriter",
".*RWKV.*\.pth": "Naive",
".*moss": "MOSS"
},
"prompts": {
"default": "QA",
".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan"
}
}

42
settings-template.yaml Normal file
View file

@ -0,0 +1,42 @@
dark_theme: false
autoload_model: true
max_new_tokens: 200
max_new_tokens_min: 1
max_new_tokens_max: 2000
seed: -1
character: None
name1: You
name2: Assistant
context: This is a conversation with your Assistant. It is a computer program designed
to help you with various tasks such as answering questions, providing recommendations,
and helping with decision making. You can ask it anything you want and it will do
its best to give you accurate and relevant information.
greeting: ''
turn_template: ''
custom_stopping_strings: ''
stop_at_newline: false
add_bos_token: true
ban_eos_token: false
skip_special_tokens: true
truncation_length: 2048
truncation_length_min: 0
truncation_length_max: 8192
mode: chat
chat_style: cai-chat
instruction_template: None
chat-instruct_command: 'Continue the chat dialogue below. Write a single reply for
the character "<|character|>".
<|prompt|>'
chat_prompt_size: 2048
chat_prompt_size_min: 0
chat_prompt_size_max: 2048
chat_generation_attempts: 1
chat_generation_attempts_min: 1
chat_generation_attempts_max: 10
default_extensions: []
chat_default_extensions:
- gallery
preset: LLaMA-Precise
prompt: QA