Bump to latest gradio (3.47) (#4258)

This commit is contained in:
oobabooga 2023-10-10 22:20:49 -03:00 committed by GitHub
parent 2b75d725e6
commit fae8062d39
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 57 additions and 95 deletions

View file

@ -92,10 +92,11 @@ div.svelte-15lo0d8 > *, div.svelte-15lo0d8 > .form > * {
.header_bar {
background-color: #f7f7f7;
margin-bottom: 19px;
display: inline !important;
overflow-x: scroll;
margin-left: calc(-1 * var(--size-4));
margin-right: calc(-1 * var(--size-4));
display: block !important;
text-wrap: nowrap;
}
.dark .header_bar {
@ -172,18 +173,27 @@ button {
}
.file-saver {
position: fixed !important;
height: 100%;
z-index: 1000;
background-color: rgba(0, 0, 0, 0.5) !important;
margin-left: -20px;
margin-right: -20px;
}
.file-saver > :first-child {
position: fixed !important;
top: 50%;
left: 50%;
transform: translate(-50%, -50%); /* center horizontally */
width: 100%;
max-width: 500px;
background-color: var(--input-background-fill);
border: 2px solid black !important;
z-index: 1000;
border: var(--input-border-width) solid var(--input-border-color) !important;
}
.dark .file-saver {
border: 2px solid white !important;
.file-saver > :first-child > :nth-child(2) {
background: var(--block-background-fill);
}
.checkboxgroup-table label {
@ -395,6 +405,14 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
box-shadow: none !important;
}
#chat-input > :first-child {
background-color: transparent;
}
#chat-input .progress-text {
display: none;
}
@media print {
body {
visibility: hidden;
@ -432,7 +450,7 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
position: absolute;
height: 100%;
background-color: var(--background-fill-primary);
border: 0px;
border: 0px !important;
border-radius: 0px;
}

View file

@ -222,6 +222,13 @@ for(i = 0; i < noBackgroundelements.length; i++) {
noBackgroundelements[i].parentNode.parentNode.parentNode.style.alignItems = "center";
}
const slimDropdownElements = document.querySelectorAll('.slim-dropdown');
for (i = 0; i < slimDropdownElements.length; i++) {
const parentNode = slimDropdownElements[i].parentNode;
parentNode.style.background = 'transparent';
parentNode.style.border = '0';
}
//------------------------------------------------
// Create the hover menu in the chat tab
// The show/hide events were adapted from:

View file

@ -40,7 +40,7 @@ def my_open(*args, **kwargs):
with original_open(*args, **kwargs) as f:
file_contents = f.read()
file_contents = file_contents.replace(b'<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>', b'')
file_contents = file_contents.replace(b'\t\t<script\n\t\t\tsrc="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.7/iframeResizer.contentWindow.min.js"\n\t\t\tasync\n\t\t></script>', b'')
file_contents = file_contents.replace(b'cdnjs.cloudflare.com', b'127.0.0.1')
return io.BytesIO(file_contents)
else:

View file

@ -90,7 +90,7 @@ def load_model(model_name, loader=None):
if any((shared.args.xformers, shared.args.sdp_attention)):
llama_attn_hijack.hijack_llama_attention()
logger.info(f"Loaded the model in {(time.time()-t0):.2f} seconds.\n")
logger.info(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
return model, tokenizer

View file

@ -80,14 +80,16 @@ def create_ui():
epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt'], info='Learning rate scheduler - defines how the learning rate changes over time. "Constant" means never change, "linear" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.', elem_classes=['slim-dropdown'])
with gr.Row():
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt'], info='Learning rate scheduler - defines how the learning rate changes over time. "Constant" means never change, "linear" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.', elem_classes=['slim-dropdown'])
with gr.Accordion(label='Advanced Options', open=False):
with gr.Row():
with gr.Column():
lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
stop_at_loss = gr.Slider(label='Stop at loss', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)')
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
with gr.Row():
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
with gr.Column():
warmup_steps = gr.Number(label='Warmup Steps', value=100, info='For this many steps at the start, the learning rate will be lower than normal. This helps the trainer prepare the model and precompute statistics to improve the quality of training after the start.')

View file

@ -207,19 +207,6 @@ def save_settings(state, preset, instruction_template, extensions, show_controls
return yaml.dump(output, sort_keys=False, width=float("inf"))
class ToolButton(gr.Button, gr.components.IOComponent):
"""
Small button with single emoji as text, fits inside gradio forms
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_block_name(self):
return "button"
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
"""
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
@ -233,7 +220,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
return gr.update(**(args or {}))
refresh_button = ToolButton(value=refresh_symbol, elem_classes=elem_class, interactive=interactive)
refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)
refresh_button.click(
fn=refresh,
inputs=[],
@ -241,11 +228,3 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
)
return refresh_button
def create_delete_button(**kwargs):
return ToolButton(value=delete_symbol, **kwargs)
def create_save_button(**kwargs):
return ToolButton(value=save_symbol, **kwargs)

View file

@ -90,7 +90,7 @@ def create_chat_settings_ui():
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
shared.gradio['character_menu'] = gr.Dropdown(value='', choices=utils.get_available_characters(), label='Character', elem_id='character-menu', info='Used in chat and chat-instruct modes.', elem_classes='slim-dropdown')
shared.gradio['character_menu'] = gr.Dropdown(value=None, choices=utils.get_available_characters(), label='Character', elem_id='character-menu', info='Used in chat and chat-instruct modes.', elem_classes='slim-dropdown')
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button', interactive=not mu)
shared.gradio['save_character'] = gr.Button('💾', elem_classes='refresh-button', interactive=not mu)
shared.gradio['delete_character'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)

View file

@ -8,16 +8,16 @@ def create_ui():
mu = shared.args.multi_user
# Text file saver
with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['file_saver']:
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['file_saver']:
shared.gradio['save_filename'] = gr.Textbox(lines=1, label='File name')
shared.gradio['save_root'] = gr.Textbox(lines=1, label='File folder', info='For reference. Unchangeable.', interactive=False)
shared.gradio['save_contents'] = gr.Textbox(lines=10, label='File contents')
with gr.Row():
shared.gradio['save_confirm'] = gr.Button('Save', elem_classes="small-button", interactive=not mu)
shared.gradio['save_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
shared.gradio['save_cancel'] = gr.Button('Cancel', elem_classes="small-button")
# Text file deleter
with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['file_deleter']:
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['file_deleter']:
shared.gradio['delete_filename'] = gr.Textbox(lines=1, label='File name')
shared.gradio['delete_root'] = gr.Textbox(lines=1, label='File folder', info='For reference. Unchangeable.', interactive=False)
with gr.Row():
@ -25,13 +25,13 @@ def create_ui():
shared.gradio['delete_cancel'] = gr.Button('Cancel', elem_classes="small-button")
# Character saver/deleter
with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.')
with gr.Row():
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", interactive=not mu)
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_deleter']:
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_deleter']:
gr.Markdown('Confirm the character deletion?')
with gr.Row():
shared.gradio['delete_character_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop', interactive=not mu)

View file

@ -71,7 +71,7 @@ def natural_keys(text):
def get_available_models():
model_list = []
model_list = ['None']
for item in list(Path(f'{shared.args.model_dir}/').glob('*')):
if not item.name.endswith(('.txt', '-np', '.pt', '.json', '.yaml', '.py')) and 'llama-tokenizer' not in item.name:
model_list.append(re.sub('.pth$', '', item.name))
@ -113,7 +113,7 @@ def get_available_extensions():
def get_available_loras():
return sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys)
return ['None'] + sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys)
def get_datasets(path: str, ext: str):

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5; platform_system != "Darwin" and platform_machine != "x86_64"
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5; platform_system != "Darwin" and platform_machine != "x86_64"
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -1,14 +1,9 @@
aiofiles==23.1.0
fastapi==0.95.2
gradio_client==0.2.5
gradio==3.33.1
pydantic==1.10.12
accelerate==0.23.*
colorama
datasets
einops
exllamav2==0.0.5
gradio==3.47.*
markdown
numpy==1.24
optimum==1.13.1

View file

@ -8,6 +8,7 @@ from modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
with RequestBlocker():
import gradio as gr