Detect CodeLlama settings

This commit is contained in:
oobabooga 2023-08-25 07:06:57 -07:00
parent 52ab2a6b9e
commit 5c7d8bfdfd
3 changed files with 6 additions and 1 deletions

View file

@ -300,3 +300,7 @@ llama-65b-gptq-3bit:
instruction_template: 'OpenOrca-Platypus2'
custom_stopping_strings: '"### Instruction:", "### Response:"'
rms_norm_eps: 5.0e-6
.*codellama:
rope_freq_base: 1000000
.*codellama.*instruct:
instruction_template: 'Llama-v2'

View file

@ -91,7 +91,7 @@ def create_ui():
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=16384, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len)
shared.gradio['alpha_value'] = gr.Slider(label='alpha_value', minimum=1, maximum=8, step=0.1, info='Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both.', value=shared.args.alpha_value)
shared.gradio['rope_freq_base'] = gr.Slider(label='rope_freq_base', minimum=0, maximum=100000, step=1000, info='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63)', value=shared.args.rope_freq_base)
shared.gradio['rope_freq_base'] = gr.Slider(label='rope_freq_base', minimum=0, maximum=1000000, step=1000, info='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63)', value=shared.args.rope_freq_base)
shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=1, info='Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.', value=shared.args.compress_pos_emb)
with gr.Column():

View file

@ -180,6 +180,7 @@ if __name__ == "__main__":
'truncation_length': shared.settings['truncation_length'],
'n_gqa': 0,
'rms_norm_eps': 0,
'rope_freq_base': 0,
}
shared.model_config.move_to_end('.*', last=False) # Move to the beginning