Improve error handling while loading GPTQ models

This commit is contained in:
oobabooga 2023-05-19 11:20:08 -03:00
parent 39dab18307
commit 9d5025f531
2 changed files with 5 additions and 2 deletions

View file

@ -140,7 +140,7 @@ def load_quantized(model_name):
if shared.args.model_type is None:
logging.error("The model could not be loaded because its type could not be inferred from its name.")
logging.error("Please specify the type manually using the --model_type argument.")
return
return None
# Select the appropriate load_quant function
model_type = shared.args.model_type.lower()

View file

@ -97,6 +97,9 @@ def load_model(model_name):
model, tokenizer = output
else:
model = output
if model is None:
return None, None
else:
tokenizer = load_tokenizer(model_name, model)
# Hijack attention with xformers