From e436d69e2b47e8d09c4d111e5fdcdabb5bab84d1 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 11 Jul 2024 15:47:37 -0700 Subject: [PATCH] Add --no_xformers and --no_sdpa flags for ExllamaV2 --- modules/exllamav2.py | 2 ++ modules/exllamav2_hf.py | 2 ++ modules/loaders.py | 4 ++++ modules/shared.py | 2 ++ modules/ui.py | 2 ++ modules/ui_model_menu.py | 4 +++- 6 files changed, 15 insertions(+), 1 deletion(-) diff --git a/modules/exllamav2.py b/modules/exllamav2.py index d39bc315..a770e342 100644 --- a/modules/exllamav2.py +++ b/modules/exllamav2.py @@ -48,6 +48,8 @@ class Exllamav2Model: config.scale_pos_emb = shared.args.compress_pos_emb config.scale_alpha_value = shared.args.alpha_value config.no_flash_attn = shared.args.no_flash_attn + config.no_xformers = shared.args.no_xformers + config.no_sdpa = shared.args.no_sdpa config.num_experts_per_token = int(shared.args.num_experts_per_token) model = ExLlamaV2(config) diff --git a/modules/exllamav2_hf.py b/modules/exllamav2_hf.py index 9ab9cdc7..53143d9a 100644 --- a/modules/exllamav2_hf.py +++ b/modules/exllamav2_hf.py @@ -176,6 +176,8 @@ class Exllamav2HF(PreTrainedModel): config.scale_pos_emb = shared.args.compress_pos_emb config.scale_alpha_value = shared.args.alpha_value config.no_flash_attn = shared.args.no_flash_attn + config.no_xformers = shared.args.no_xformers + config.no_sdpa = shared.args.no_sdpa config.num_experts_per_token = int(shared.args.num_experts_per_token) return Exllamav2HF(config) diff --git a/modules/loaders.py b/modules/loaders.py index 78601c17..75ed897b 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -84,6 +84,8 @@ loaders_and_params = OrderedDict({ 'max_seq_len', 'cfg_cache', 'no_flash_attn', + 'no_xformers', + 'no_sdpa', 'num_experts_per_token', 'cache_8bit', 'cache_4bit', @@ -97,6 +99,8 @@ loaders_and_params = OrderedDict({ 'gpu_split', 'max_seq_len', 'no_flash_attn', + 'no_xformers', + 'no_sdpa', 'num_experts_per_token', 'cache_8bit', 'cache_4bit', diff --git a/modules/shared.py b/modules/shared.py index e04c549a..d96e3156 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -143,6 +143,8 @@ group.add_argument('--autosplit', action='store_true', help='Autosplit the model group.add_argument('--max_seq_len', type=int, default=2048, help='Maximum sequence length.') group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.') group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.') +group.add_argument('--no_xformers', action='store_true', help='Force xformers to not be used.') +group.add_argument('--no_sdpa', action='store_true', help='Force Torch SDPA to not be used.') group.add_argument('--cache_8bit', action='store_true', help='Use 8-bit cache to save VRAM.') group.add_argument('--cache_4bit', action='store_true', help='Use Q4 cache to save VRAM.') group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.') diff --git a/modules/ui.py b/modules/ui.py index b1c1cf6d..d77266ce 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -85,6 +85,8 @@ def list_model_elements(): 'disable_exllamav2', 'cfg_cache', 'no_flash_attn', + 'no_xformers', + 'no_sdpa', 'num_experts_per_token', 'cache_8bit', 'cache_4bit', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 8a5a195c..7a85020f 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -138,7 +138,9 @@ def create_ui(): shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk) shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16) shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.') - shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.') + shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn) + shared.gradio['no_xformers'] = gr.Checkbox(label="no_xformers", value=shared.args.no_xformers) + shared.gradio['no_sdpa'] = gr.Checkbox(label="no_sdpa", value=shared.args.no_sdpa) shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Necessary to use CFG with this loader.') shared.gradio['cpp_runner'] = gr.Checkbox(label="cpp-runner", value=shared.args.cpp_runner, info='Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.') shared.gradio['num_experts_per_token'] = gr.Number(label="Number of experts per token", value=shared.args.num_experts_per_token, info='Only applies to MoE models like Mixtral.')