mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-09-20 18:45:09 +02:00
Merge pull request #393 from WojtekKowaluk/mps_support
Fix for MPS support on Apple Silicon
This commit is contained in:
commit
bcd8afd906
3 changed files with 12 additions and 2 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -9,6 +9,8 @@ torch-dumps/*
|
||||||
*pycache*
|
*pycache*
|
||||||
*/*pycache*
|
*/*pycache*
|
||||||
*/*/pycache*
|
*/*/pycache*
|
||||||
|
venv/
|
||||||
|
.venv/
|
||||||
|
|
||||||
settings.json
|
settings.json
|
||||||
img_bot*
|
img_bot*
|
||||||
|
|
|
@ -47,7 +47,12 @@ def load_model(model_name):
|
||||||
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
||||||
else:
|
else:
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16)
|
||||||
|
if torch.has_mps:
|
||||||
|
device = torch.device('mps')
|
||||||
|
model = model.to(device)
|
||||||
|
else:
|
||||||
|
model = model.cuda()
|
||||||
|
|
||||||
# FlexGen
|
# FlexGen
|
||||||
elif shared.args.flexgen:
|
elif shared.args.flexgen:
|
||||||
|
@ -97,7 +102,7 @@ def load_model(model_name):
|
||||||
# Custom
|
# Custom
|
||||||
else:
|
else:
|
||||||
params = {"low_cpu_mem_usage": True}
|
params = {"low_cpu_mem_usage": True}
|
||||||
if not shared.args.cpu and not torch.cuda.is_available():
|
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
|
||||||
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
|
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
|
||||||
shared.args.cpu = True
|
shared.args.cpu = True
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,9 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
||||||
return input_ids.numpy()
|
return input_ids.numpy()
|
||||||
elif shared.args.deepspeed:
|
elif shared.args.deepspeed:
|
||||||
return input_ids.to(device=local_rank)
|
return input_ids.to(device=local_rank)
|
||||||
|
elif torch.has_mps:
|
||||||
|
device = torch.device('mps')
|
||||||
|
return input_ids.to(device)
|
||||||
else:
|
else:
|
||||||
return input_ids.cuda()
|
return input_ids.cuda()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue