Add lots of sliders

This commit is contained in:
oobabooga 2023-02-07 22:08:21 -03:00
parent 53af062fa5
commit 24dc705eca
17 changed files with 413 additions and 375 deletions

View file

@ -1,2 +1 @@
do_sample=False, do_sample=False,
max_new_tokens=tokens,

View file

@ -1,4 +1,3 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_k=100, top_k=100,
top_p=0.9, top_p=0.9,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.9, top_p=0.9,
top_k=50, top_k=50,
temperature=1.39, temperature=1.39,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.5, top_p=0.5,
top_k=0, top_k=0,
temperature=0.7, temperature=0.7,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=1.0, top_p=1.0,
top_k=0, top_k=0,
temperature=0.66, temperature=0.66,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=1, top_p=1,
typical_p=0.3, typical_p=0.3,
temperature=0.7, temperature=0.7,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.9, top_p=0.9,
top_k=100, top_k=100,
temperature=0.8, temperature=0.8,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=1.0, top_p=1.0,
top_k=100, top_k=100,
temperature=2, temperature=2,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.98, top_p=0.98,
top_k=0, top_k=0,
temperature=0.63, temperature=0.63,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.85, top_p=0.85,
top_k=12, top_k=12,
temperature=2, temperature=2,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=1.0, top_p=1.0,
top_k=100, top_k=100,
temperature=1.07, temperature=1.07,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=1.0, top_p=1.0,
top_k=0, top_k=0,
temperature=0.44, temperature=0.44,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.18, top_p=0.18,
top_k=30, top_k=30,
temperature=2.0, temperature=2.0,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.73, top_p=0.73,
top_k=0, top_k=0,
temperature=0.72, temperature=0.72,

View file

@ -1,5 +1,4 @@
do_sample=True, do_sample=True,
max_new_tokens=tokens,
top_p=0.9, top_p=0.9,
top_k=0, top_k=0,
temperature=0.5, temperature=0.5,

View file

@ -1,6 +1,5 @@
num_beams=10, num_beams=10,
min_length=tokens, min_length=200,
max_new_tokens=tokens,
length_penalty =1.4, length_penalty =1.4,
no_repeat_ngram_size=2, no_repeat_ngram_size=2,
early_stopping=True, early_stopping=True,

278
server.py
View file

@ -150,6 +150,40 @@ def load_model(model_name):
print(f"Loaded the model in {(time.time()-t0):.2f} seconds.") print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
return model, tokenizer return model, tokenizer
def load_model_wrapper(selected_model):
global model_name, model, tokenizer
if selected_model != model_name:
model_name = selected_model
model = tokenizer = None
if not args.cpu:
gc.collect()
torch.cuda.empty_cache()
model, tokenizer = load_model(model_name)
def load_preset_values(preset_menu, return_dict=False):
settings = {
'do_sample': True,
'temperature': 1,
'top_p': 1,
'typical_p': 1,
'repetition_penalty': 1,
'top_k': 50,
}
with open(Path(f'presets/{preset_menu}.txt'), 'r') as infile:
preset = infile.read()
for i in preset.split(','):
i = i.strip().split('=')
if len(i) == 2 and i[0].strip() != 'tokens':
settings[i[0].strip()] = eval(i[1].strip())
settings['temperature'] = min(1.99, settings['temperature'])
if return_dict:
return settings
else:
return settings['do_sample'], settings['temperature'], settings['top_p'], settings['typical_p'], settings['repetition_penalty'], settings['top_k']
# Removes empty replies from gpt4chan outputs # Removes empty replies from gpt4chan outputs
def fix_gpt4chan(s): def fix_gpt4chan(s):
for i in range(10): for i in range(10):
@ -194,8 +228,8 @@ def formatted_outputs(reply, model_name):
else: else:
return reply return reply
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None): def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, eos_token=None, stopping_string=None):
global model, tokenizer, model_name, loaded_preset, preset global model_name, model, tokenizer
original_question = question original_question = question
if not (args.chat or args.cai_chat): if not (args.chat or args.cai_chat):
@ -203,18 +237,6 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
if args.verbose: if args.verbose:
print(f"\n\n{question}\n--------------------\n") print(f"\n\n{question}\n--------------------\n")
if selected_model != model_name:
model_name = selected_model
model = tokenizer = None
if not args.cpu:
gc.collect()
torch.cuda.empty_cache()
model, tokenizer = load_model(model_name)
if inference_settings != loaded_preset:
with open(Path(f'presets/{inference_settings}.txt'), 'r') as infile:
preset = infile.read()
loaded_preset = inference_settings
input_ids = encode(question, tokens) input_ids = encode(question, tokens)
cuda = "" if (args.cpu or args.deepspeed) else ".cuda()" cuda = "" if (args.cpu or args.deepspeed) else ".cuda()"
n = tokenizer.eos_token_id if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1] n = tokenizer.eos_token_id if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
@ -231,15 +253,29 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
else: else:
stopping_criteria_list = None stopping_criteria_list = None
generate_params = [f"eos_token_id={n}", "stopping_criteria=stopping_criteria_list"] generate_params = [
f"eos_token_id={n}",
f"stopping_criteria=stopping_criteria_list",
f"do_sample={do_sample}",
f"temperature={temperature}",
f"top_p={top_p}",
f"typical_p={typical_p}",
f"repetition_penalty={repetition_penalty}",
f"top_k={top_k}",
]
if args.deepspeed: if args.deepspeed:
generate_params.append("synced_gpus=True") generate_params.append("synced_gpus=True")
if args.no_stream:
generate_params.append(f"max_new_tokens=tokens")
else:
generate_params.append(f"max_new_tokens=8")
# Generate the entire reply at once # Generate the entire reply at once
if args.no_stream: if args.no_stream:
t0 = time.time() t0 = time.time()
with torch.no_grad(): with torch.no_grad():
output = eval(f"model.generate(input_ids, {','.join(generate_params)}, {preset}){cuda}") output = eval(f"model.generate(input_ids, {','.join(generate_params)}){cuda}")
reply = decode(output[0]) reply = decode(output[0])
t1 = time.time() t1 = time.time()
print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output[0])-len(input_ids[0]))/(t1-t0)/8:.2f} it/s, {len(output[0])-len(input_ids[0])} tokens)") print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output[0])-len(input_ids[0]))/(t1-t0)/8:.2f} it/s, {len(output[0])-len(input_ids[0])} tokens)")
@ -250,10 +286,9 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
# Generate the reply 1 token at a time # Generate the reply 1 token at a time
else: else:
yield formatted_outputs(original_question, model_name) yield formatted_outputs(original_question, model_name)
preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=8')
for i in tqdm(range(tokens//8+1)): for i in tqdm(range(tokens//8+1)):
with torch.no_grad(): with torch.no_grad():
output = eval(f"model.generate(input_ids, {','.join(generate_params)}, {preset}){cuda}") output = eval(f"model.generate(input_ids, {','.join(generate_params)}){cuda}")
reply = decode(output[0]) reply = decode(output[0])
if not (args.chat or args.cai_chat): if not (args.chat or args.cai_chat):
reply = original_question + apply_extensions(reply[len(question):], "output") reply = original_question + apply_extensions(reply[len(question):], "output")
@ -285,6 +320,18 @@ def update_extensions_parameters(*kwargs):
params[param] = eval(f"kwargs[{i}]") params[param] = eval(f"kwargs[{i}]")
i += 1 i += 1
def get_available_models():
return sorted(set([item.replace('.pt', '') for item in map(lambda x : str(x.name), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*'))) if not item.endswith('.txt')]), key=str.lower)
def get_available_presets():
return sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('presets').glob('*.txt'))), key=str.lower)
def get_available_characters():
return ["None"] + sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('characters').glob('*.json'))), key=str.lower)
def get_available_extensions():
return sorted(set(map(lambda x : x.parts[1], Path('extensions').glob('*/script.py'))), key=str.lower)
def create_extensions_block(): def create_extensions_block():
extensions_ui_elements = [] extensions_ui_elements = []
default_values = [] default_values = []
@ -307,62 +354,33 @@ def create_extensions_block():
btn_extensions = gr.Button("Apply") btn_extensions = gr.Button("Apply")
btn_extensions.click(update_extensions_parameters, [*extensions_ui_elements], []) btn_extensions.click(update_extensions_parameters, [*extensions_ui_elements], [])
def get_available_models(): def create_settings_menus():
return sorted(set([item.replace('.pt', '') for item in map(lambda x : str(x.name), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*'))) if not item.endswith('.txt')]), key=str.lower) defaults = load_preset_values(settings[f'preset{suffix}'], return_dict=True)
def get_available_presets(): with gr.Row():
return sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('presets').glob('*.txt'))), key=str.lower) with gr.Column():
with gr.Row():
model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
with gr.Column():
with gr.Row():
preset_menu = gr.Dropdown(choices=available_presets, value=settings[f'preset{suffix}'], label='Generation parameters preset')
create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
def get_available_characters(): with gr.Accordion("Custom generation parameters", open=False):
return ["None"] + sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('characters').glob('*.json'))), key=str.lower) with gr.Row():
with gr.Column():
do_sample = gr.Checkbox(value=defaults['do_sample'], label="do_sample")
temperature = gr.Slider(0.01, 1.99, value=defaults['temperature'], step=0.01, label="temperature")
top_p = gr.Slider(0.0,1.0,value=defaults['top_p'],step=0.01,label="top_p")
with gr.Column():
typical_p = gr.Slider(0.0,1.0,value=defaults['typical_p'],step=0.01,label="typical_p")
repetition_penalty = gr.Slider(1.0,5.0,value=defaults['repetition_penalty'],step=0.01,label="repetition_penalty")
top_k = gr.Slider(0,200,value=defaults['top_k'],step=1,label="top_k")
def get_available_extensions(): model_menu.change(load_model_wrapper, [model_menu], [])
return sorted(set(map(lambda x : x.parts[1], Path('extensions').glob('*/script.py'))), key=str.lower) preset_menu.change(load_preset_values, [preset_menu], [do_sample, temperature, top_p, typical_p, repetition_penalty, top_k])
return preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k
available_models = get_available_models()
available_presets = get_available_presets()
available_characters = get_available_characters()
available_extensions = get_available_extensions()
extension_state = {}
if args.extensions is not None:
for i,ext in enumerate(args.extensions.split(',')):
if ext in available_extensions:
print(f'Loading the extension "{ext}"... ', end='')
ext_string = f"extensions.{ext}.script"
exec(f"import {ext_string}")
extension_state[ext] = [True, i]
print(f'Ok.')
# Choosing the default model
if args.model is not None:
model_name = args.model
else:
if len(available_models) == 0:
print("No models are available! Please download at least one.")
sys.exit(0)
elif len(available_models) == 1:
i = 0
else:
print("The following models are available:\n")
for i,model in enumerate(available_models):
print(f"{i+1}. {model}")
print(f"\nWhich one do you want to load? 1-{len(available_models)}\n")
i = int(input())-1
print()
model_name = available_models[i]
model, tokenizer = load_model(model_name)
loaded_preset = None
# UI settings
default_text = settings['prompt_gpt4chan'] if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) else settings['prompt']
description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
css = ".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem} #refresh-button {flex: none; margin: 0; padding: 0; min-width: 50px; border: none; box-shadow: none; border-radius: 0} #download-label, #upload-label {min-height: 0}"
buttons = {}
gen_events = []
if args.chat or args.cai_chat:
history = {'internal': [], 'visible': []}
character = None
# This gets the new line characters right. # This gets the new line characters right.
def clean_chat_message(text): def clean_chat_message(text):
@ -433,14 +451,14 @@ if args.chat or args.cai_chat:
return reply, next_character_found, substring_found return reply, next_character_found, substring_found
def chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): def chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
original_text = text original_text = text
text = apply_extensions(text, "input") text = apply_extensions(text, "input")
question = generate_chat_prompt(text, tokens, name1, name2, context, history_size) question = generate_chat_prompt(text, tokens, name1, name2, context, history_size)
history['internal'].append(['', '']) history['internal'].append(['', ''])
history['visible'].append(['', '']) history['visible'].append(['', ''])
eos_token = '\n' if check else None eos_token = '\n' if check else None
for reply in generate_reply(question, tokens, inference_settings, selected_model, eos_token=eos_token, stopping_string=f"\n{name1}:"): for reply in generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, eos_token=eos_token, stopping_string=f"\n{name1}:"):
reply, next_character_found, substring_found = extract_message_from_reply(question, reply, name2, name1, check, extensions=True) reply, next_character_found, substring_found = extract_message_from_reply(question, reply, name2, name1, check, extensions=True)
history['internal'][-1] = [text, reply] history['internal'][-1] = [text, reply]
history['visible'][-1] = [original_text, apply_extensions(reply, "output")] history['visible'][-1] = [original_text, apply_extensions(reply, "output")]
@ -450,10 +468,10 @@ if args.chat or args.cai_chat:
break break
yield history['visible'] yield history['visible']
def impersonate_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): def impersonate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
question = generate_chat_prompt(text, tokens, name1, name2, context, history_size, impersonate=True) question = generate_chat_prompt(text, tokens, name1, name2, context, history_size, impersonate=True)
eos_token = '\n' if check else None eos_token = '\n' if check else None
for reply in generate_reply(question, tokens, inference_settings, selected_model, eos_token=eos_token, stopping_string=f"\n{name2}:"): for reply in generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, eos_token=eos_token, stopping_string=f"\n{name2}:"):
reply, next_character_found, substring_found = extract_message_from_reply(question, reply, name1, name2, check, extensions=False) reply, next_character_found, substring_found = extract_message_from_reply(question, reply, name1, name2, check, extensions=False)
if not substring_found: if not substring_found:
yield apply_extensions(reply, "output") yield apply_extensions(reply, "output")
@ -461,19 +479,19 @@ if args.chat or args.cai_chat:
break break
yield apply_extensions(reply, "output") yield apply_extensions(reply, "output")
def cai_chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): def cai_chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
for _history in chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): for _history in chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
yield generate_chat_html(_history, name1, name2, character) yield generate_chat_html(_history, name1, name2, character)
def regenerate_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): def regenerate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
last = history['visible'].pop() last = history['visible'].pop()
history['internal'].pop() history['internal'].pop()
text = last[0] text = last[0]
if args.cai_chat: if args.cai_chat:
for i in cai_chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): for i in cai_chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
yield i yield i
else: else:
for i in chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size): for i in chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size):
yield i yield i
def remove_last_message(name1, name2): def remove_last_message(name1, name2):
@ -660,7 +678,53 @@ if args.chat or args.cai_chat:
img.save(Path(f'img_me.png')) img.save(Path(f'img_me.png'))
print(f'Profile picture saved to "img_me.png"') print(f'Profile picture saved to "img_me.png"')
# Global variables
available_models = get_available_models()
available_presets = get_available_presets()
available_characters = get_available_characters()
available_extensions = get_available_extensions()
extension_state = {}
if args.extensions is not None:
for i,ext in enumerate(args.extensions.split(',')):
if ext in available_extensions:
print(f'Loading the extension "{ext}"... ', end='')
ext_string = f"extensions.{ext}.script"
exec(f"import {ext_string}")
extension_state[ext] = [True, i]
print(f'Ok.')
# Choosing the default model
if args.model is not None:
model_name = args.model
else:
if len(available_models) == 0:
print("No models are available! Please download at least one.")
sys.exit(0)
elif len(available_models) == 1:
i = 0
else:
print("The following models are available:\n")
for i,model in enumerate(available_models):
print(f"{i+1}. {model}")
print(f"\nWhich one do you want to load? 1-{len(available_models)}\n")
i = int(input())-1
print()
model_name = available_models[i]
model, tokenizer = load_model(model_name)
loaded_preset = None
# UI settings
default_text = settings['prompt_gpt4chan'] if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) else settings['prompt']
description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
css = ".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem} #refresh-button {flex: none; margin: 0; padding: 0; min-width: 50px; border: none; box-shadow: none; border-radius: 0} #download-label, #upload-label {min-height: 0}"
buttons = {}
gen_events = []
suffix = '_pygmalion' if 'pygmalion' in model_name.lower() else '' suffix = '_pygmalion' if 'pygmalion' in model_name.lower() else ''
history = {'internal': [], 'visible': []}
character = None
if args.chat or args.cai_chat:
with gr.Blocks(css=css+".h-\[40vh\] {height: 66.67vh} .gradio-container {max-width: 800px; margin-left: auto; margin-right: auto} .w-screen {width: unset}", analytics_enabled=False) as interface: with gr.Blocks(css=css+".h-\[40vh\] {height: 66.67vh} .gradio-container {max-width: 800px; margin-left: auto; margin-right: auto} .w-screen {width: unset}", analytics_enabled=False) as interface:
if args.cai_chat: if args.cai_chat:
display = gr.HTML(value=generate_chat_html([], "", "", character)) display = gr.HTML(value=generate_chat_html([], "", "", character))
@ -681,15 +745,11 @@ if args.chat or args.cai_chat:
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens']) max_new_tokens = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
with gr.Row():
model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
with gr.Column(): with gr.Column():
history_size_slider = gr.Slider(minimum=settings['history_size_min'], maximum=settings['history_size_max'], step=1, label='Chat history size in prompt (0 for no limit)', value=settings['history_size']) history_size_slider = gr.Slider(minimum=settings['history_size_min'], maximum=settings['history_size_max'], step=1, label='Chat history size in prompt (0 for no limit)', value=settings['history_size'])
with gr.Row():
preset_menu = gr.Dropdown(choices=available_presets, value=settings[f'preset{suffix}'], label='Generation parameters preset') preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k = create_settings_menus()
create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
name1 = gr.Textbox(value=settings[f'name1{suffix}'], lines=1, label='Your name') name1 = gr.Textbox(value=settings[f'name1{suffix}'], lines=1, label='Your name')
name2 = gr.Textbox(value=settings[f'name2{suffix}'], lines=1, label='Bot\'s name') name2 = gr.Textbox(value=settings[f'name2{suffix}'], lines=1, label='Bot\'s name')
@ -727,7 +787,7 @@ if args.chat or args.cai_chat:
if args.extensions is not None: if args.extensions is not None:
create_extensions_block() create_extensions_block()
input_params = [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check, history_size_slider] input_params = [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, name1, name2, context, check, history_size_slider]
if args.cai_chat: if args.cai_chat:
gen_events.append(buttons["Generate"].click(cai_chatbot_wrapper, input_params, display, show_progress=args.no_stream, api_name="textgen")) gen_events.append(buttons["Generate"].click(cai_chatbot_wrapper, input_params, display, show_progress=args.no_stream, api_name="textgen"))
gen_events.append(textbox.submit(cai_chatbot_wrapper, input_params, display, show_progress=args.no_stream)) gen_events.append(textbox.submit(cai_chatbot_wrapper, input_params, display, show_progress=args.no_stream))
@ -768,25 +828,19 @@ elif args.notebook:
markdown = gr.Markdown() markdown = gr.Markdown()
with gr.Tab('HTML'): with gr.Tab('HTML'):
html = gr.HTML() html = gr.HTML()
buttons["Generate"] = gr.Button("Generate") buttons["Generate"] = gr.Button("Generate")
buttons["Stop"] = gr.Button("Stop") buttons["Stop"] = gr.Button("Stop")
length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens']) max_new_tokens = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
with gr.Row():
with gr.Column(): preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k = create_settings_menus()
with gr.Row():
model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
with gr.Column():
with gr.Row():
preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Generation parameters preset')
create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
if args.extensions is not None: if args.extensions is not None:
create_extensions_block() create_extensions_block()
gen_events.append(buttons["Generate"].click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")) gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k], [textbox, markdown, html], show_progress=args.no_stream, api_name="textgen"))
gen_events.append(textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream)) gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k], [textbox, markdown, html], show_progress=args.no_stream))
buttons["Stop"].click(None, None, None, cancels=gen_events) buttons["Stop"].click(None, None, None, cancels=gen_events)
else: else:
@ -795,19 +849,15 @@ else:
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
textbox = gr.Textbox(value=default_text, lines=15, label='Input') textbox = gr.Textbox(value=default_text, lines=15, label='Input')
length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens']) max_new_tokens = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
with gr.Row():
preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Generation parameters preset')
create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
with gr.Row():
model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
buttons["Generate"] = gr.Button("Generate") buttons["Generate"] = gr.Button("Generate")
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
buttons["Continue"] = gr.Button("Continue") buttons["Continue"] = gr.Button("Continue")
with gr.Column(): with gr.Column():
buttons["Stop"] = gr.Button("Stop") buttons["Stop"] = gr.Button("Stop")
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k = create_settings_menus()
if args.extensions is not None: if args.extensions is not None:
create_extensions_block() create_extensions_block()
@ -819,13 +869,17 @@ else:
with gr.Tab('HTML'): with gr.Tab('HTML'):
html = gr.HTML() html = gr.HTML()
gen_events.append(buttons["Generate"].click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")) gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k], [output_textbox, markdown, html], show_progress=args.no_stream, api_name="textgen"))
gen_events.append(textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)) gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k], [output_textbox, markdown, html], show_progress=args.no_stream))
gen_events.append(buttons["Continue"].click(generate_reply, [output_textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)) gen_events.append(buttons["Continue"].click(generate_reply, [output_textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k], [output_textbox, markdown, html], show_progress=args.no_stream))
buttons["Stop"].click(None, None, None, cancels=gen_events) buttons["Stop"].click(None, None, None, cancels=gen_events)
interface.queue() interface.queue()
if args.listen: if args.listen:
interface.launch(share=args.share, server_name="0.0.0.0", server_port=args.listen_port) interface.launch(prevent_thread_lock=True, share=args.share, server_name="0.0.0.0", server_port=args.listen_port)
else: else:
interface.launch(share=args.share, server_port=args.listen_port) interface.launch(prevent_thread_lock=True, share=args.share, server_port=args.listen_port)
# I think that I will need this later
while True:
time.sleep(0.5)