Don't import PEFT unless necessary

This commit is contained in:
oobabooga 2024-09-03 19:40:53 -07:00
parent c5b40eb555
commit bba5b36d33
2 changed files with 13 additions and 9 deletions

View file

@ -1,7 +1,6 @@
from pathlib import Path from pathlib import Path
import torch import torch
from peft import PeftModel
from transformers import is_torch_xpu_available from transformers import is_torch_xpu_available
import modules.shared as shared import modules.shared as shared
@ -85,6 +84,9 @@ def add_lora_autogptq(lora_names):
def add_lora_transformers(lora_names): def add_lora_transformers(lora_names):
from peft import PeftModel
prior_set = set(shared.lora_names) prior_set = set(shared.lora_names)
added_set = set(lora_names) - prior_set added_set = set(lora_names) - prior_set
removed_set = prior_set - set(lora_names) removed_set = prior_set - set(lora_names)

View file

@ -18,14 +18,6 @@ import gradio as gr
import torch import torch
import transformers import transformers
from datasets import Dataset, load_dataset from datasets import Dataset, load_dataset
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_kbit_training,
set_peft_model_state_dict
)
from peft.utils.other import \
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules
from transformers import is_torch_xpu_available from transformers import is_torch_xpu_available
from transformers.models.auto.modeling_auto import ( from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
@ -292,6 +284,16 @@ def calc_trainable_parameters(model):
def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en: bool, k_proj_en: bool, o_proj_en: bool, gate_proj_en: bool, down_proj_en: bool, up_proj_en: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str): def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en: bool, k_proj_en: bool, o_proj_en: bool, gate_proj_en: bool, down_proj_en: bool, up_proj_en: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str):
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_kbit_training,
set_peft_model_state_dict
)
from peft.utils.other import \
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as \
model_to_lora_modules
global WANT_INTERRUPT global WANT_INTERRUPT
WANT_INTERRUPT = False WANT_INTERRUPT = False