From 510a01ef46166ebbcff9943c2cd210440795147b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 16 Nov 2023 18:03:06 -0800 Subject: [PATCH] Lint --- download-model.py | 1 - extensions/openai/completions.py | 3 ++- extensions/openai/embeddings.py | 1 + extensions/openai/images.py | 1 + extensions/openai/moderations.py | 3 ++- extensions/openai/script.py | 17 +++++++++-------- modules/one_click_installer_check.py | 1 + 7 files changed, 16 insertions(+), 11 deletions(-) diff --git a/download-model.py b/download-model.py index 65f4fa52..f8be9862 100644 --- a/download-model.py +++ b/download-model.py @@ -21,7 +21,6 @@ import tqdm from requests.adapters import HTTPAdapter from tqdm.contrib.concurrent import thread_map - base = "https://huggingface.co" diff --git a/extensions/openai/completions.py b/extensions/openai/completions.py index 6fd533b0..99525b66 100644 --- a/extensions/openai/completions.py +++ b/extensions/openai/completions.py @@ -5,6 +5,8 @@ from collections import deque import tiktoken import torch import torch.nn.functional as F +from transformers import LogitsProcessor, LogitsProcessorList + from extensions.openai.errors import InvalidRequestError from extensions.openai.utils import debug_msg from modules import shared @@ -15,7 +17,6 @@ from modules.chat import ( ) from modules.presets import load_preset_memoized from modules.text_generation import decode, encode, generate_reply -from transformers import LogitsProcessor, LogitsProcessorList class LogitsBiasProcessor(LogitsProcessor): diff --git a/extensions/openai/embeddings.py b/extensions/openai/embeddings.py index 5482a3a5..fcdaab63 100644 --- a/extensions/openai/embeddings.py +++ b/extensions/openai/embeddings.py @@ -1,6 +1,7 @@ import os import numpy as np + from extensions.openai.errors import ServiceUnavailableError from extensions.openai.utils import debug_msg, float_list_to_base64 from modules.logging_colors import logger diff --git a/extensions/openai/images.py b/extensions/openai/images.py index 1c8ea3a0..92bd85f0 100644 --- a/extensions/openai/images.py +++ b/extensions/openai/images.py @@ -2,6 +2,7 @@ import os import time import requests + from extensions.openai.errors import ServiceUnavailableError diff --git a/extensions/openai/moderations.py b/extensions/openai/moderations.py index 1d2d4c1d..1ca6b8ab 100644 --- a/extensions/openai/moderations.py +++ b/extensions/openai/moderations.py @@ -1,9 +1,10 @@ import time import numpy as np -from extensions.openai.embeddings import get_embeddings from numpy.linalg import norm +from extensions.openai.embeddings import get_embeddings + moderations_disabled = False # return 0/false category_embeddings = None antonym_embeddings = None diff --git a/extensions/openai/script.py b/extensions/openai/script.py index f799f970..aa4a4bf4 100644 --- a/extensions/openai/script.py +++ b/extensions/openai/script.py @@ -4,26 +4,27 @@ import os import traceback from threading import Thread +import speech_recognition as sr +import uvicorn +from fastapi import Depends, FastAPI, Header, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.requests import Request +from fastapi.responses import JSONResponse +from pydub import AudioSegment +from sse_starlette import EventSourceResponse + import extensions.openai.completions as OAIcompletions import extensions.openai.embeddings as OAIembeddings import extensions.openai.images as OAIimages import extensions.openai.models as OAImodels import extensions.openai.moderations as OAImoderations -import speech_recognition as sr -import uvicorn from extensions.openai.errors import ServiceUnavailableError from extensions.openai.tokens import token_count, token_decode, token_encode from extensions.openai.utils import _start_cloudflared -from fastapi import Depends, FastAPI, Header, HTTPException -from fastapi.middleware.cors import CORSMiddleware -from fastapi.requests import Request -from fastapi.responses import JSONResponse from modules import shared from modules.logging_colors import logger from modules.models import unload_model from modules.text_generation import stop_everything_event -from pydub import AudioSegment -from sse_starlette import EventSourceResponse from .typing import ( ChatCompletionRequest, diff --git a/modules/one_click_installer_check.py b/modules/one_click_installer_check.py index 1a7dd2b9..4bde8600 100644 --- a/modules/one_click_installer_check.py +++ b/modules/one_click_installer_check.py @@ -1,4 +1,5 @@ from pathlib import Path + from modules.logging_colors import logger if Path('../webui.py').exists():