From 2f6631195a94790c6d287381e8565fd25cc312a6 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 2 Jun 2023 01:45:46 -0300 Subject: [PATCH] Add desc_act checkbox to the UI --- README.md | 1 + modules/ui.py | 2 +- server.py | 7 ++++--- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index be71e718..e6ca1cae 100644 --- a/README.md +++ b/README.md @@ -272,6 +272,7 @@ Optionally, you can use the following command-line flags: |------------------|-------------| | `--autogptq` | Use AutoGPTQ for loading quantized models instead of the internal GPTQ loader. | | `--triton` | Use triton. | +|` --desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. | #### FlexGen diff --git a/modules/ui.py b/modules/ui.py index 628684fb..62796032 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -30,7 +30,7 @@ theme = gr.themes.Default( def list_model_elements(): - elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'autogptq', 'triton', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] + elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'autogptq', 'triton', 'desc_act', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] for i in range(torch.cuda.device_count()): elements.append(f'gpu_memory_{i}') diff --git a/server.py b/server.py index 91c0a1dd..639eb10f 100644 --- a/server.py +++ b/server.py @@ -390,9 +390,9 @@ def create_model_menus(): with gr.Column(): with gr.Box(): - gr.Markdown('GPTQ') with gr.Row(): with gr.Column(): + gr.Markdown('GPTQ') shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None") shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None") shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None") @@ -400,8 +400,9 @@ def create_model_menus(): with gr.Column(): shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) gr.Markdown('AutoGPTQ') - shared.gradio['autogptq'] = gr.Checkbox(label="autogptq", value=shared.args.autogptq, info='AutoGPTQ needs to be manually installed from source. When enabled, gpu-memory should be used for CPU offloading instead of pre_layer.') - shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton, info='Use triton in AutoGPTQ.') + shared.gradio['autogptq'] = gr.Checkbox(label="autogptq", value=shared.args.autogptq, info='When enabled, gpu-memory should be used for CPU offloading instead of pre_layer.') + shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) + shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='Only used for old models without a quantize_config.json.') with gr.Box(): gr.Markdown('llama.cpp')