diff --git a/modules/training.py b/modules/training.py index c98fded2..ef833679 100644 --- a/modules/training.py +++ b/modules/training.py @@ -483,7 +483,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch exc = traceback.format_exc() logger.error('Failed to reload the model.') print(exc) - return exc + return exc.replace('\n', '\n\n') # == Start prepping the model itself == if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'): @@ -518,7 +518,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin") set_peft_model_state_dict(lora_model, state_dict_peft) except: - yield traceback.format_exc() + yield traceback.format_exc().replace('\n', '\n\n') return if shared.args.monkey_patch: diff --git a/server.py b/server.py index 679c9e93..601ae33f 100644 --- a/server.py +++ b/server.py @@ -75,7 +75,7 @@ def load_model_wrapper(selected_model, loader, autoload=False): exc = traceback.format_exc() logger.error('Failed to load the model.') print(exc) - yield exc + yield exc.replace('\n', '\n\n') def load_lora_wrapper(selected_loras): @@ -159,7 +159,7 @@ def download_model_wrapper(repo_id, progress=gr.Progress()): yield ("Done!") except: progress(1.0) - yield traceback.format_exc() + yield traceback.format_exc().replace('\n', '\n\n') def create_model_menus():