Minor changes

This commit is contained in:
oobabooga 2023-03-17 22:56:46 -03:00
parent 7994b580d5
commit e26763a510
3 changed files with 4 additions and 10 deletions

5
.gitignore vendored
View file

@ -9,6 +9,8 @@ torch-dumps/*
*pycache*
*/*pycache*
*/*/pycache*
venv/
.venv/
settings.json
img_bot*
@ -19,6 +21,3 @@ img_me*
!models/place-your-models-here.txt
!softprompts/place-your-softprompts-here.txt
!torch-dumps/place-your-pt-models-here.txt
venv/
.venv/

View file

@ -47,17 +47,13 @@ def load_model(model_name):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
else:
model = AutoModelForCausalLM.from_pretrained(
Path(f"models/{shared.model_name}"),
low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16
)
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16)
if torch.has_mps:
device = torch.device('mps')
model = model.to(device)
else:
model = model.cuda()
# FlexGen
elif shared.args.flexgen:
# Initialize environment
@ -106,7 +102,7 @@ def load_model(model_name):
# Custom
else:
params = {"low_cpu_mem_usage": True}
if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps:
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
shared.args.cpu = True

View file

@ -39,7 +39,6 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
else:
return input_ids.cuda()
def decode(output_ids):
# Open Assistant relies on special tokens like <|endoftext|>
if re.match('(oasst|galactica)-*', shared.model_name.lower()):