From e52b697d5a79cd1a0dcb48a4eb0f52a38dcd66f0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 21 Feb 2023 00:54:53 -0300 Subject: [PATCH] Add bf16 back here (the fp16 -> bf16 conversion takes a few seconds) --- convert-to-safetensors.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/convert-to-safetensors.py b/convert-to-safetensors.py index 9d3e3f56..60770843 100644 --- a/convert-to-safetensors.py +++ b/convert-to-safetensors.py @@ -23,6 +23,7 @@ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpForma parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.") parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).') parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).") +parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') args = parser.parse_args() if __name__ == '__main__': @@ -30,7 +31,7 @@ if __name__ == '__main__': model_name = path.name print(f"Loading {model_name}...") - model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.float16) + model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16) tokenizer = AutoTokenizer.from_pretrained(path) out_folder = args.output or Path(f"models/{model_name}_safetensors")