diff --git a/modules/models.py b/modules/models.py index c376c808..087adada 100644 --- a/modules/models.py +++ b/modules/models.py @@ -151,8 +151,15 @@ def huggingface_loader(model_name): # Load with quantization and/or offloading else: - if not any((shared.args.cpu, torch.cuda.is_available(), torch.backends.mps.is_available())): - logger.warning('torch.cuda.is_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.') + conditions = [ + shared.args.cpu, + torch.cuda.is_available(), + torch.backends.mps.is_available(), + hasattr(torch, 'xpu') and torch.xpu.is_available(), + ] + + if not any(conditions): + logger.warning('No GPU has been detected by Pytorch. Falling back to CPU mode.') shared.args.cpu = True if shared.args.cpu: