From 78c0da4a18953b355a96f92a40f8df5f22292931 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:04:05 -0300 Subject: [PATCH] Use the cuda branch of gptq-for-llama Did I do this right @jllllll? This is because the current default branch (triton) is not compatible with Windows. --- install.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/install.bat b/install.bat index 69cdd45a..81f6d070 100644 --- a/install.bat +++ b/install.bat @@ -93,6 +93,7 @@ cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git cd GPTQ-for-LLaMa || goto end + git checkout cuda call python -m pip install -r requirements.txt call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" (