diff --git a/.gitignore b/.gitignore index e2017e49..d98b81d8 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ torch-dumps/* *pycache* */*pycache* */*/pycache* +venv/ +.venv/ settings.json img_bot* @@ -19,6 +21,3 @@ img_me* !models/place-your-models-here.txt !softprompts/place-your-softprompts-here.txt !torch-dumps/place-your-pt-models-here.txt - -venv/ -.venv/ diff --git a/modules/models.py b/modules/models.py index 8fa7307e..f07e738b 100644 --- a/modules/models.py +++ b/modules/models.py @@ -47,17 +47,13 @@ def load_model(model_name): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) else: - model = AutoModelForCausalLM.from_pretrained( - Path(f"models/{shared.model_name}"), - low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 - ) + model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16) if torch.has_mps: device = torch.device('mps') model = model.to(device) else: model = model.cuda() - # FlexGen elif shared.args.flexgen: # Initialize environment @@ -106,7 +102,7 @@ def load_model(model_name): # Custom else: params = {"low_cpu_mem_usage": True} - if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps: + if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)): print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n") shared.args.cpu = True diff --git a/modules/text_generation.py b/modules/text_generation.py index 3a7bfa6e..1d11de12 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -39,7 +39,6 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True): else: return input_ids.cuda() - def decode(output_ids): # Open Assistant relies on special tokens like <|endoftext|> if re.match('(oasst|galactica)-*', shared.model_name.lower()):