From a717fd709d4ef5ab1a5bf97b9e59593ea7e36569 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 17 Mar 2023 11:42:25 -0300 Subject: [PATCH] Sort the imports --- modules/callbacks.py | 1 + modules/chat.py | 3 ++- modules/models.py | 3 +-- server.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/callbacks.py b/modules/callbacks.py index faa4a5e9..12a90cc3 100644 --- a/modules/callbacks.py +++ b/modules/callbacks.py @@ -7,6 +7,7 @@ import transformers import modules.shared as shared + # Copied from https://github.com/PygmalionAI/gradio-ui/ class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria): diff --git a/modules/chat.py b/modules/chat.py index d7202bee..3f313db2 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -12,7 +12,8 @@ import modules.extensions as extensions_module import modules.shared as shared from modules.extensions import apply_extensions from modules.html_generator import generate_chat_html -from modules.text_generation import encode, generate_reply, get_max_prompt_length +from modules.text_generation import (encode, generate_reply, + get_max_prompt_length) # This gets the new line characters right. diff --git a/modules/models.py b/modules/models.py index 6df67d3c..e4507e57 100644 --- a/modules/models.py +++ b/modules/models.py @@ -8,11 +8,10 @@ import numpy as np import torch import transformers from accelerate import infer_auto_device_map, init_empty_weights +from peft import PeftModel from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig) -from peft import PeftModel - import modules.shared as shared transformers.logging.set_verbosity_error() diff --git a/server.py b/server.py index 7d5ecc74..5c21f4cd 100644 --- a/server.py +++ b/server.py @@ -15,9 +15,9 @@ import modules.extensions as extensions_module import modules.shared as shared import modules.ui as ui from modules.html_generator import generate_chat_html +from modules.LoRA import add_lora_to_model from modules.models import load_model, load_soft_prompt from modules.text_generation import generate_reply -from modules.LoRA import add_lora_to_model # Loading custom settings settings_file = None