From 0b86ac38b10fff006a7f0ed81d8787549c8fa091 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 12 Mar 2023 16:40:10 -0300 Subject: [PATCH 001/133] Initial commit --- README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..6967bc47 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# one-click-installers \ No newline at end of file From 88af917e0e40376954abb2766b73db972e78d7d8 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 12 Mar 2023 16:42:50 -0300 Subject: [PATCH 002/133] Add files via upload --- INSTRUCTIONS.txt | 13 +++++++ download-model.bat | 9 +++++ install.bat | 90 ++++++++++++++++++++++++++++++++++++++++++++++ start-webui.bat | 11 ++++++ 4 files changed, 123 insertions(+) create mode 100644 INSTRUCTIONS.txt create mode 100644 download-model.bat create mode 100644 install.bat create mode 100644 start-webui.bat diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt new file mode 100644 index 00000000..9afcc4a3 --- /dev/null +++ b/INSTRUCTIONS.txt @@ -0,0 +1,13 @@ +Thank you for downloading oobabooga/text-generation-webui. +Here is how to get it up and running: + +1. Run the "install" script to install the web UI and its requirements in this folder. +2. Run the "download" script to download a model of your choice. +3. Run the "start-webui" script to launch the web UI. + +To add flags like --chat, --notebook, --extensions, etc, edit the +"start-webui" script using a text editor and add the desired flags +to the line that says "python server.py...". + +To get the latest updates in the future, just re-run the "install" script. +This will only install the updates, so it should be much faster. diff --git a/download-model.bat b/download-model.bat new file mode 100644 index 00000000..874d359e --- /dev/null +++ b/download-model.bat @@ -0,0 +1,9 @@ +@echo off + +set INSTALL_ENV_DIR=%cd%\installer_files\env +set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% +call conda activate +cd text-generation-webui +python download-model.py + +pause diff --git a/install.bat b/install.bat new file mode 100644 index 00000000..85a96ef9 --- /dev/null +++ b/install.bat @@ -0,0 +1,90 @@ +@echo off + +@rem Based on the installer found here: https://github.com/Sygil-Dev/sygil-webui +@rem This script will install git and conda (if not found on the PATH variable) +@rem using micromamba (an 8mb static-linked single-file binary, conda replacement). +@rem This enables a user to install this project without manually installing conda and git. + +echo What is your GPU? +echo. +echo A) NVIDIA +echo B) None (I want to run in CPU mode) +echo. +set /p "gpuchoice=Input> " +set gpuchoice=%gpuchoice:~0,1% +setlocal enabledelayedexpansion +set gpuchoice=!gpuchoice:a=A! +set gpuchoice=!gpuchoice:b=B! + +if "%gpuchoice%" == "A" ( + set "PACKAGES_TO_INSTALL=torchvision torchaudio pytorch-cuda=11.7 conda git" + set "CHANNEL=-c nvidia" +) else if "%gpuchoice%" == "B" ( + set "PACKAGES_TO_INSTALL=torchvision torchaudio pytorch conda git" + set "CHANNEL=" +) else ( + echo Invalid choice. Exiting... + exit +) + +set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba +set INSTALL_ENV_DIR=%cd%\installer_files\env +set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe +set REPO_URL=https://github.com/oobabooga/text-generation-webui.git +set umamba_exists=F + +@rem figure out whether git and conda needs to be installed +if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% +call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2 +if "%ERRORLEVEL%" EQU "0" set umamba_exists=T + +@rem (if necessary) install git and conda into a contained environment +if "%PACKAGES_TO_INSTALL%" NEQ "" ( + @rem download micromamba + if "%umamba_exists%" == "F" ( + echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" + + mkdir "%MAMBA_ROOT_PREFIX%" + call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" + + @rem test the mamba binary + echo Micromamba version: + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version + ) + + @rem create the installer env + if not exist "%INSTALL_ENV_DIR%" ( + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" + ) + + echo "Packages to install: %PACKAGES_TO_INSTALL%" + + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge -c pytorch %CHANNEL% %PACKAGES_TO_INSTALL% + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge -c pytorch %CHANNEL% %PACKAGES_TO_INSTALL% + + if not exist "%INSTALL_ENV_DIR%" ( + echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue." + pause + exit /b + ) +) + +set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% + +@rem clone the repository and install the pip requirements +call conda activate +if exist text-generation-webui\ ( + cd text-generation-webui + git pull +) else ( + git clone https://github.com/oobabooga/text-generation-webui.git + cd text-generation-webui +) +call python -m pip install -r requirements.txt +call python -m pip install -r extensions\google_translate\requirements.txt +call python -m pip install -r extensions\silero_tts\requirements.txt + +cd .. +del .tmp1 .tmp2 + +pause diff --git a/start-webui.bat b/start-webui.bat new file mode 100644 index 00000000..2a4189de --- /dev/null +++ b/start-webui.bat @@ -0,0 +1,11 @@ +@echo off + +@echo Starting the web UI... + +set INSTALL_ENV_DIR=%cd%\installer_files\env +set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% +call conda activate +cd text-generation-webui +call python server.py --auto-devices --cai-chat + +pause From 0a4d8a5cf6ab863e15d05a37d8c2cfff56ae4eb8 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 12 Mar 2023 16:43:06 -0300 Subject: [PATCH 003/133] Delete README.md --- README.md | 1 - 1 file changed, 1 deletion(-) delete mode 100644 README.md diff --git a/README.md b/README.md deleted file mode 100644 index 6967bc47..00000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -# one-click-installers \ No newline at end of file From 88b2520fdadb64bc7b23da0b9b9b56b39a5a164b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 13 Mar 2023 22:49:13 -0300 Subject: [PATCH 004/133] Add --upgrade to pip --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 85a96ef9..963c1ce2 100644 --- a/install.bat +++ b/install.bat @@ -80,7 +80,7 @@ if exist text-generation-webui\ ( git clone https://github.com/oobabooga/text-generation-webui.git cd text-generation-webui ) -call python -m pip install -r requirements.txt +call python -m pip install -r requirements.txt --upgrade call python -m pip install -r extensions\google_translate\requirements.txt call python -m pip install -r extensions\silero_tts\requirements.txt From 3e710994025df1d4d424415978384aecf118add8 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 13 Mar 2023 22:50:42 -0300 Subject: [PATCH 005/133] Add --upgrade everywhere --- install.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 963c1ce2..27f05f8d 100644 --- a/install.bat +++ b/install.bat @@ -81,8 +81,8 @@ if exist text-generation-webui\ ( cd text-generation-webui ) call python -m pip install -r requirements.txt --upgrade -call python -m pip install -r extensions\google_translate\requirements.txt -call python -m pip install -r extensions\silero_tts\requirements.txt +call python -m pip install -r extensions\google_translate\requirements.txt --upgrade +call python -m pip install -r extensions\silero_tts\requirements.txt --upgrade cd .. del .tmp1 .tmp2 From 92d8b3e8dd958d1056b03314a4dc9c3be55df38a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 13 Mar 2023 22:53:06 -0300 Subject: [PATCH 006/133] Update install.bat --- install.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 27f05f8d..963c1ce2 100644 --- a/install.bat +++ b/install.bat @@ -81,8 +81,8 @@ if exist text-generation-webui\ ( cd text-generation-webui ) call python -m pip install -r requirements.txt --upgrade -call python -m pip install -r extensions\google_translate\requirements.txt --upgrade -call python -m pip install -r extensions\silero_tts\requirements.txt --upgrade +call python -m pip install -r extensions\google_translate\requirements.txt +call python -m pip install -r extensions\silero_tts\requirements.txt cd .. del .tmp1 .tmp2 From 4639bc84c05cf6f9f452b3a7351827215fc55477 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 15 Mar 2023 20:08:35 -0300 Subject: [PATCH 007/133] Specify torchvision version --- install.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 963c1ce2..768ebf8b 100644 --- a/install.bat +++ b/install.bat @@ -17,10 +17,10 @@ set gpuchoice=!gpuchoice:a=A! set gpuchoice=!gpuchoice:b=B! if "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=torchvision torchaudio pytorch-cuda=11.7 conda git" + set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch-cuda=11.7 conda git" set "CHANNEL=-c nvidia" ) else if "%gpuchoice%" == "B" ( - set "PACKAGES_TO_INSTALL=torchvision torchaudio pytorch conda git" + set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch conda git" set "CHANNEL=" ) else ( echo Invalid choice. Exiting... From 91371640f944cb505def354a51483d9f834bf649 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 17 Mar 2023 20:37:25 -0300 Subject: [PATCH 008/133] Use the official instructions https://pytorch.org/get-started/locally/ --- install.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 768ebf8b..8d923589 100644 --- a/install.bat +++ b/install.bat @@ -17,10 +17,10 @@ set gpuchoice=!gpuchoice:a=A! set gpuchoice=!gpuchoice:b=B! if "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch-cuda=11.7 conda git" + set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio pytorch-cuda=11.7 conda git" set "CHANNEL=-c nvidia" ) else if "%gpuchoice%" == "B" ( - set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch conda git" + set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly conda git" set "CHANNEL=" ) else ( echo Invalid choice. Exiting... From 9ed3a03d4b77591e50626818ba7238058ca6444a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 18 Mar 2023 11:25:08 -0300 Subject: [PATCH 009/133] Don't use the official instructions --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 8d923589..4dc7018c 100644 --- a/install.bat +++ b/install.bat @@ -17,7 +17,7 @@ set gpuchoice=!gpuchoice:a=A! set gpuchoice=!gpuchoice:b=B! if "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio pytorch-cuda=11.7 conda git" + set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch-cuda=11.7 conda git" set "CHANNEL=-c nvidia" ) else if "%gpuchoice%" == "B" ( set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly conda git" From a80a5465f269d3bf951478517cfc1236c73519ec Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 17:27:29 -0500 Subject: [PATCH 010/133] Update install.bat Updated Conda packages and channels to install cuda-toolkit and override 12.0 cuda packages requested by pytorch with their 11.7 equivalent. Removed Conda installation since we can use the downloaded Micromamba.exe for the same purpose with a smaller footprint. Removed redundant PATH changes. Changed %gpuchoice% comparisons to be case-insensitive. Added additional error handling and removed the use of .tmp files. Added missing extension requirements. Added GPTQ installation. Will attempt to compile locally and, if failed, will download and install a precompiled wheel. Incorporated fixes from one-click-bandaid. Fixed and expanded first sed command from one-click-bandaid. libbitsandbytes_cudaall.dll is used here as the cuda116.dll used by one-click-bandaid does not work on my 1080ti. This can be changed if needed. --- install.bat | 93 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 31 deletions(-) diff --git a/install.bat b/install.bat index 4dc7018c..58875ea3 100644 --- a/install.bat +++ b/install.bat @@ -1,7 +1,7 @@ @echo off @rem Based on the installer found here: https://github.com/Sygil-Dev/sygil-webui -@rem This script will install git and conda (if not found on the PATH variable) +@rem This script will install git and all dependencies @rem using micromamba (an 8mb static-linked single-file binary, conda replacement). @rem This enables a user to install this project without manually installing conda and git. @@ -12,16 +12,13 @@ echo B) None (I want to run in CPU mode) echo. set /p "gpuchoice=Input> " set gpuchoice=%gpuchoice:~0,1% -setlocal enabledelayedexpansion -set gpuchoice=!gpuchoice:a=A! -set gpuchoice=!gpuchoice:b=B! -if "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=torchvision=0.14.1 torchaudio=0.13.1 pytorch-cuda=11.7 conda git" - set "CHANNEL=-c nvidia" -) else if "%gpuchoice%" == "B" ( - set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly conda git" - set "CHANNEL=" +if /I "%gpuchoice%" == "A" ( + set "PACKAGES_TO_INSTALL=python=3.10.9 pytorch torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit conda-forge::ninja conda-forge::git" + set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia" +) else if /I "%gpuchoice%" == "B" ( + set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly git" + set "CHANNEL=-c conda-forge -c pytorch" ) else ( echo Invalid choice. Exiting... exit @@ -34,57 +31,91 @@ set REPO_URL=https://github.com/oobabooga/text-generation-webui.git set umamba_exists=F @rem figure out whether git and conda needs to be installed -if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% -call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2 +call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >nul 2>&1 if "%ERRORLEVEL%" EQU "0" set umamba_exists=T @rem (if necessary) install git and conda into a contained environment if "%PACKAGES_TO_INSTALL%" NEQ "" ( @rem download micromamba if "%umamba_exists%" == "F" ( - echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" + echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" mkdir "%MAMBA_ROOT_PREFIX%" call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" @rem test the mamba binary echo Micromamba version: - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version || ( echo Micromamba not found. && goto end ) ) + @rem create micromamba hook + if not exist "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" ( + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 + ) + + @rem activate base micromamba env + call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) + @rem create the installer env if not exist "%INSTALL_ENV_DIR%" ( - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" + call micromamba create -y --prefix "%INSTALL_ENV_DIR%" ) + @rem activate installer env + call micromamba activate "%INSTALL_ENV_DIR%" || ( echo %INSTALL_ENV_DIR% not found. && goto end ) echo "Packages to install: %PACKAGES_TO_INSTALL%" - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge -c pytorch %CHANNEL% %PACKAGES_TO_INSTALL% - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge -c pytorch %CHANNEL% %PACKAGES_TO_INSTALL% - - if not exist "%INSTALL_ENV_DIR%" ( - echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue." - pause - exit /b - ) + call micromamba install -y %CHANNEL% %PACKAGES_TO_INSTALL% ) -set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% - @rem clone the repository and install the pip requirements -call conda activate if exist text-generation-webui\ ( cd text-generation-webui git pull ) else ( git clone https://github.com/oobabooga/text-generation-webui.git - cd text-generation-webui + cd text-generation-webui || goto end ) call python -m pip install -r requirements.txt --upgrade -call python -m pip install -r extensions\google_translate\requirements.txt -call python -m pip install -r extensions\silero_tts\requirements.txt +call python -m pip install -r extensions\api\requirements.txt --upgrade +call python -m pip install -r extensions\elevenlabs_tts\requirements.txt --upgrade +call python -m pip install -r extensions\google_translate\requirements.txt --upgrade +call python -m pip install -r extensions\silero_tts\requirements.txt --upgrade +call python -m pip install -r extensions\whisper_stt\requirements.txt --upgrade -cd .. -del .tmp1 .tmp2 +@rem skip gptq install if cpu only +if /I not "%gpuchoice%" == "A" goto bandaid +@rem download gptq and compile locally and if compile fails, install from wheel +if not exist repositories\ ( + mkdir repositories +) +cd repositories || goto end +if not exist GPTQ-for-LLaMa\ ( + git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git + cd GPTQ-for-LLaMa || goto end + git reset --hard 468c47c01b4fe370616747b6d69a2d3f48bab5e4 + pip install -r requirements.txt + call python setup_cuda.py install + if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( + echo CUDA kernal compilation failed. Will try to install from wheel. + pip install unzip + curl -LO https://github.com/oobabooga/text-generation-webui/files/11023775/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip + unzip quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip + pip install quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) + ) + cd .. +) +cd ..\.. + +:bandaid +curl -LO https://github.com/DeXtmL/bitsandbytes-win-prebuilt/raw/main/libbitsandbytes_cpu.dll +curl -LO https://github.com/james-things/bitsandbytes-prebuilt-all_arch/raw/main/0.37.0/libbitsandbytes_cudaall.dll +mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" +mv libbitsandbytes_cuda116.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" +pip install sed +sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" +sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" + +:end pause From 817e6c681e98b6cf48df42c37dbfebdd715a21a8 Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 17:51:13 -0500 Subject: [PATCH 011/133] Update install.bat Added `cd /D "%~dp0"` in case the script is ran as admin. --- install.bat | 2 ++ 1 file changed, 2 insertions(+) diff --git a/install.bat b/install.bat index 58875ea3..b7f921d8 100644 --- a/install.bat +++ b/install.bat @@ -24,6 +24,8 @@ if /I "%gpuchoice%" == "A" ( exit ) +cd /D "%~dp0" + set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe From eec773b1f4c8a7dbb090d59a44ef788ab7cc9a59 Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 17:54:47 -0500 Subject: [PATCH 012/133] Update install.bat Corrected libbitsandbytes_cudaall.dll install. --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index b7f921d8..e0ffa7b3 100644 --- a/install.bat +++ b/install.bat @@ -114,7 +114,7 @@ cd ..\.. curl -LO https://github.com/DeXtmL/bitsandbytes-win-prebuilt/raw/main/libbitsandbytes_cpu.dll curl -LO https://github.com/james-things/bitsandbytes-prebuilt-all_arch/raw/main/0.37.0/libbitsandbytes_cudaall.dll mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" -mv libbitsandbytes_cuda116.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" +mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" pip install sed sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" From f0c82f06c364c5a3dfc9088100f537ec3d9f987a Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:09:44 -0500 Subject: [PATCH 013/133] Add files via upload Add script to open cmd within installation environment for easier modification. --- micromamba-cmd.bat | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 micromamba-cmd.bat diff --git a/micromamba-cmd.bat b/micromamba-cmd.bat new file mode 100644 index 00000000..de868fd7 --- /dev/null +++ b/micromamba-cmd.bat @@ -0,0 +1,15 @@ +@echo off + +set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba +set INSTALL_ENV_DIR=%cd%\installer_files\env + +if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 +) +call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) +call micromamba activate "%INSTALL_ENV_DIR%" || goto end + +cmd /k "%*" + +:end +pause \ No newline at end of file From 24870e51ed6a53d81ddb33970baae0c7c1aa7a69 Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:12:02 -0500 Subject: [PATCH 014/133] Update micromamba-cmd.bat Add cd command for admin. --- micromamba-cmd.bat | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/micromamba-cmd.bat b/micromamba-cmd.bat index de868fd7..0365fcec 100644 --- a/micromamba-cmd.bat +++ b/micromamba-cmd.bat @@ -1,5 +1,7 @@ @echo off +cd /D "%~dp0" + set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env @@ -12,4 +14,4 @@ call micromamba activate "%INSTALL_ENV_DIR%" || goto end cmd /k "%*" :end -pause \ No newline at end of file +pause From 2604e3f7ac580eeabd548d3a152ba375c61c279c Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:15:24 -0500 Subject: [PATCH 015/133] Update download-model.bat Added variables for model selection and text only mode. Updated virtual environment handling to use Micromamba. --- download-model.bat | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/download-model.bat b/download-model.bat index 874d359e..148f493a 100644 --- a/download-model.bat +++ b/download-model.bat @@ -1,9 +1,28 @@ @echo off -set INSTALL_ENV_DIR=%cd%\installer_files\env -set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% -call conda activate -cd text-generation-webui -python download-model.py +SET TextOnly=False &REM True or False for Text only mode +SET ModelName="chansung/alpaca-lora-13b" &REM HuggingFace model repo eg. "facebook/opt-1.3b" +cd /D "%~dp0" + +set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba +set INSTALL_ENV_DIR=%cd%\installer_files\env + +if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 +) +call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo MicroMamba hook not found. && goto end ) +call micromamba activate "%INSTALL_ENV_DIR%" || goto end + +cd text-generation-webui || goto end +goto %TextOnly% + +:False +call python download-model.py %ModelName% +goto end + +:True +call python download-model.py %ModelName% --text-only + +:end pause From bddbc2f89848d4cbc554c8f91a39761955b34b76 Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:19:23 -0500 Subject: [PATCH 016/133] Update start-webui.bat Updated virtual environment handling to use Micromamba. --- start-webui.bat | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/start-webui.bat b/start-webui.bat index 2a4189de..32d7833d 100644 --- a/start-webui.bat +++ b/start-webui.bat @@ -2,10 +2,19 @@ @echo Starting the web UI... +cd /D "%~dp0" + +set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH% -call conda activate + +if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 +) +call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) +call micromamba activate "%INSTALL_ENV_DIR%" || goto end cd text-generation-webui + call python server.py --auto-devices --cai-chat +:end pause From 586775ad47712c4e7117a48dbcdb3f582db57d8f Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:25:49 -0500 Subject: [PATCH 017/133] Update download-model.bat Removed redundant %ModelName% variable. --- download-model.bat | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/download-model.bat b/download-model.bat index 148f493a..e1b7442f 100644 --- a/download-model.bat +++ b/download-model.bat @@ -1,7 +1,6 @@ @echo off SET TextOnly=False &REM True or False for Text only mode -SET ModelName="chansung/alpaca-lora-13b" &REM HuggingFace model repo eg. "facebook/opt-1.3b" cd /D "%~dp0" @@ -18,11 +17,11 @@ cd text-generation-webui || goto end goto %TextOnly% :False -call python download-model.py %ModelName% +call python download-model.py goto end :True -call python download-model.py %ModelName% --text-only +call python download-model.py --text-only :end pause From fa916aa1de89b068cece26c45e3da255f6c8d021 Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 18:28:46 -0500 Subject: [PATCH 018/133] Update INSTRUCTIONS.txt Added clarification on new variable added to download-model.bat. --- INSTRUCTIONS.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt index 9afcc4a3..668d9060 100644 --- a/INSTRUCTIONS.txt +++ b/INSTRUCTIONS.txt @@ -2,7 +2,7 @@ Thank you for downloading oobabooga/text-generation-webui. Here is how to get it up and running: 1. Run the "install" script to install the web UI and its requirements in this folder. -2. Run the "download" script to download a model of your choice. +2. Run the "download" script to download a model of your choice. Change TextOnly variable at top of script to download only config files. 3. Run the "start-webui" script to launch the web UI. To add flags like --chat, --notebook, --extensions, etc, edit the From 1e260544cd41023566a32fa297c89fb91d22948c Mon Sep 17 00:00:00 2001 From: jllllll Date: Fri, 24 Mar 2023 21:25:14 -0500 Subject: [PATCH 019/133] Update install.bat Added C:\Windows\System32 to PATH to avoid issues with broken? Windows installs. --- install.bat | 2 ++ 1 file changed, 2 insertions(+) diff --git a/install.bat b/install.bat index e0ffa7b3..ac86f775 100644 --- a/install.bat +++ b/install.bat @@ -26,6 +26,8 @@ if /I "%gpuchoice%" == "A" ( cd /D "%~dp0" +set PATH=%SystemRoot%\system32;%PATH% + set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe From 2e02d42682e9a6765fb16843d516efd235537484 Mon Sep 17 00:00:00 2001 From: jllllll Date: Sat, 25 Mar 2023 01:14:29 -0500 Subject: [PATCH 020/133] Changed things around to allow Micromamba to work with paths containing spaces. --- download-model.bat | 5 ++--- install.bat | 19 +++++++------------ micromamba-cmd.bat | 5 ++--- start-webui.bat | 5 ++--- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/download-model.bat b/download-model.bat index e1b7442f..af2610c6 100644 --- a/download-model.bat +++ b/download-model.bat @@ -7,11 +7,10 @@ cd /D "%~dp0" set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( +if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 ) -call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo MicroMamba hook not found. && goto end ) -call micromamba activate "%INSTALL_ENV_DIR%" || goto end +call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) cd text-generation-webui || goto end goto %TextOnly% diff --git a/install.bat b/install.bat index ac86f775..43c68949 100644 --- a/install.bat +++ b/install.bat @@ -53,25 +53,20 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" ( ) @rem create micromamba hook - if not exist "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" ( + if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 ) - @rem activate base micromamba env - call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) - @rem create the installer env if not exist "%INSTALL_ENV_DIR%" ( - call micromamba create -y --prefix "%INSTALL_ENV_DIR%" + echo Packages to install: %PACKAGES_TO_INSTALL% + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" %CHANNEL% %PACKAGES_TO_INSTALL% ) - @rem activate installer env - call micromamba activate "%INSTALL_ENV_DIR%" || ( echo %INSTALL_ENV_DIR% not found. && goto end ) - - echo "Packages to install: %PACKAGES_TO_INSTALL%" - - call micromamba install -y %CHANNEL% %PACKAGES_TO_INSTALL% ) +@rem activate installer env +call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) + @rem clone the repository and install the pip requirements if exist text-generation-webui\ ( cd text-generation-webui @@ -122,4 +117,4 @@ sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', No sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" :end -pause +pause \ No newline at end of file diff --git a/micromamba-cmd.bat b/micromamba-cmd.bat index 0365fcec..355e7b43 100644 --- a/micromamba-cmd.bat +++ b/micromamba-cmd.bat @@ -5,11 +5,10 @@ cd /D "%~dp0" set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( +if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 ) -call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) -call micromamba activate "%INSTALL_ENV_DIR%" || goto end +call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) cmd /k "%*" diff --git a/start-webui.bat b/start-webui.bat index 32d7833d..694f07a1 100644 --- a/start-webui.bat +++ b/start-webui.bat @@ -7,11 +7,10 @@ cd /D "%~dp0" set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -if not exist "%MAMBA_ROOT_PREFIX%\Scripts\activate.bat" ( +if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 ) -call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat" || ( echo Micromamba hook not found. && goto end ) -call micromamba activate "%INSTALL_ENV_DIR%" || goto end +call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) cd text-generation-webui call python server.py --auto-devices --cai-chat From ce9a5e3b531d223b8e073ce4ec8a3858747aa9ff Mon Sep 17 00:00:00 2001 From: jllllll Date: Sat, 25 Mar 2023 02:22:02 -0500 Subject: [PATCH 021/133] Update install.bat Minor fixes --- install.bat | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/install.bat b/install.bat index 43c68949..47eba25c 100644 --- a/install.bat +++ b/install.bat @@ -94,14 +94,14 @@ if not exist GPTQ-for-LLaMa\ ( git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git cd GPTQ-for-LLaMa || goto end git reset --hard 468c47c01b4fe370616747b6d69a2d3f48bab5e4 - pip install -r requirements.txt + call python -m pip install -r requirements.txt call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( echo CUDA kernal compilation failed. Will try to install from wheel. - pip install unzip + call python -m pip install unzip curl -LO https://github.com/oobabooga/text-generation-webui/files/11023775/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip unzip quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip - pip install quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) + call python -m pip install quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) ) cd .. ) @@ -112,9 +112,8 @@ curl -LO https://github.com/DeXtmL/bitsandbytes-win-prebuilt/raw/main/libbitsand curl -LO https://github.com/james-things/bitsandbytes-prebuilt-all_arch/raw/main/0.37.0/libbitsandbytes_cudaall.dll mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" -pip install sed sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" :end -pause \ No newline at end of file +pause From 247e8e5b798e5cd8c9cc2ddf7974b97a7359b338 Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 00:24:00 -0500 Subject: [PATCH 022/133] Fix for issue in current GPTQ-for-LLaMa. --- install.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/install.bat b/install.bat index 47eba25c..f1f70bfc 100644 --- a/install.bat +++ b/install.bat @@ -114,6 +114,7 @@ mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" +sed -i "s/make_quant(model, layers, wbits, groupsize, faster=args.faster_kernel)/make_quant(model, layers, wbits, groupsize)/g" "%INSTALL_ENV_DIR%\..\..\text-generation-webui\repositories\GPTQ-for-LLaMa\llama.py" :end pause From 12baa0e84b04d37b61a7ac120d4f1396a89c6cbd Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 00:46:07 -0500 Subject: [PATCH 023/133] Update for latest GPTQ-for-LLaMa --- install.bat | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/install.bat b/install.bat index f1f70bfc..46b108ad 100644 --- a/install.bat +++ b/install.bat @@ -93,14 +93,11 @@ cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git cd GPTQ-for-LLaMa || goto end - git reset --hard 468c47c01b4fe370616747b6d69a2d3f48bab5e4 call python -m pip install -r requirements.txt call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( echo CUDA kernal compilation failed. Will try to install from wheel. - call python -m pip install unzip - curl -LO https://github.com/oobabooga/text-generation-webui/files/11023775/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip - unzip quant_cuda-0.0.0-cp310-cp310-win_amd64.whl.zip + curl -LO https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl call python -m pip install quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) ) cd .. From 6dcfcf4fed425f1574d65becf7001a779530946a Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 01:00:52 -0500 Subject: [PATCH 024/133] Amended fix for GPTQ-for-LLaMa Prevents breaking 3-bit support --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 46b108ad..9f7f5cac 100644 --- a/install.bat +++ b/install.bat @@ -111,7 +111,7 @@ mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" -sed -i "s/make_quant(model, layers, wbits, groupsize, faster=args.faster_kernel)/make_quant(model, layers, wbits, groupsize)/g" "%INSTALL_ENV_DIR%\..\..\text-generation-webui\repositories\GPTQ-for-LLaMa\llama.py" +sed -i "s/make_quant(model, layers, wbits, groupsize, faster=args.faster_kernel)/make_quant(model, layers, wbits, groupsize, faster=("args" in globals() and args.faster_kernel))/g" "%INSTALL_ENV_DIR%\..\..\text-generation-webui\repositories\GPTQ-for-LLaMa\llama.py" :end pause From 6f892420949882aae2d123d87d3eccb5843ccee0 Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 03:29:14 -0500 Subject: [PATCH 025/133] Remove temporary fix for GPTQ-for-LLaMa No longer necessary. --- install.bat | 1 - 1 file changed, 1 deletion(-) diff --git a/install.bat b/install.bat index 9f7f5cac..926971f3 100644 --- a/install.bat +++ b/install.bat @@ -111,7 +111,6 @@ mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" -sed -i "s/make_quant(model, layers, wbits, groupsize, faster=args.faster_kernel)/make_quant(model, layers, wbits, groupsize, faster=("args" in globals() and args.faster_kernel))/g" "%INSTALL_ENV_DIR%\..\..\text-generation-webui\repositories\GPTQ-for-LLaMa\llama.py" :end pause From bdf85ffcf97f2e0f7452936f7fef56e68a076a10 Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 21:56:16 -0500 Subject: [PATCH 026/133] Remove explicit pytorch installation Fixes an issue some people were having: https://github.com/oobabooga/text-generation-webui/issues/15 I did not experience this issue on my system. Not everyone does for some reason. --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 926971f3..68e1b72c 100644 --- a/install.bat +++ b/install.bat @@ -14,7 +14,7 @@ set /p "gpuchoice=Input> " set gpuchoice=%gpuchoice:~0,1% if /I "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=python=3.10.9 pytorch torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit conda-forge::ninja conda-forge::git" + set "PACKAGES_TO_INSTALL=python=3.10.9 torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit conda-forge::ninja conda-forge::git" set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia" ) else if /I "%gpuchoice%" == "B" ( set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly git" From cb5dff0087122f62a1aa9bcc8a2c24851c6df3f3 Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 26 Mar 2023 23:40:46 -0500 Subject: [PATCH 027/133] Update installer to use official micromamba url --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 68e1b72c..914cd7e5 100644 --- a/install.bat +++ b/install.bat @@ -30,7 +30,7 @@ set PATH=%SystemRoot%\system32;%PATH% set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe +set MICROMAMBA_DOWNLOAD_URL=https://github.com/mamba-org/micromamba-releases/releases/latest/download/micromamba-win-64 set REPO_URL=https://github.com/oobabooga/text-generation-webui.git set umamba_exists=F From ed0e593161a25756d579424cb19ace0cd9ecc537 Mon Sep 17 00:00:00 2001 From: jllllll Date: Wed, 29 Mar 2023 02:47:19 -0500 Subject: [PATCH 028/133] Change Micromamba download Changed link to previous version. This will provide a stable source for Micromamba so that new versions don't cause issues. --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 914cd7e5..69cdd45a 100644 --- a/install.bat +++ b/install.bat @@ -30,7 +30,7 @@ set PATH=%SystemRoot%\system32;%PATH% set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env -set MICROMAMBA_DOWNLOAD_URL=https://github.com/mamba-org/micromamba-releases/releases/latest/download/micromamba-win-64 +set MICROMAMBA_DOWNLOAD_URL=https://github.com/mamba-org/micromamba-releases/releases/download/1.4.0-0/micromamba-win-64 set REPO_URL=https://github.com/oobabooga/text-generation-webui.git set umamba_exists=F From 78c0da4a18953b355a96f92a40f8df5f22292931 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:04:05 -0300 Subject: [PATCH 029/133] Use the cuda branch of gptq-for-llama Did I do this right @jllllll? This is because the current default branch (triton) is not compatible with Windows. --- install.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/install.bat b/install.bat index 69cdd45a..81f6d070 100644 --- a/install.bat +++ b/install.bat @@ -93,6 +93,7 @@ cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git cd GPTQ-for-LLaMa || goto end + git checkout cuda call python -m pip install -r requirements.txt call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( From 85e4ec6e6becfb229c50688de242346d5f30c402 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 30 Mar 2023 18:22:48 -0300 Subject: [PATCH 030/133] Download the cuda branch directly --- install.bat | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 81f6d070..11eb264b 100644 --- a/install.bat +++ b/install.bat @@ -91,9 +91,8 @@ if not exist repositories\ ( ) cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( - git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git + git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda cd GPTQ-for-LLaMa || goto end - git checkout cuda call python -m pip install -r requirements.txt call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( From 0b4ee14edc5ca96940447725017977d63f54a39a Mon Sep 17 00:00:00 2001 From: jllllll Date: Thu, 30 Mar 2023 20:04:16 -0500 Subject: [PATCH 031/133] Attempt to Improve Reliability Have pip directly download and install backup GPTQ wheel instead of first downloading through curl. Install bitsandbytes from wheel compiled for Windows from modified source. Add clarification of minor, intermittent issue to instructions. Add system32 folder to end of PATH rather than beginning. Add warning when installed under a path containing spaces. --- INSTRUCTIONS.txt | 8 +++++++- install.bat | 20 ++++++-------------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt index 668d9060..cb655f60 100644 --- a/INSTRUCTIONS.txt +++ b/INSTRUCTIONS.txt @@ -2,7 +2,7 @@ Thank you for downloading oobabooga/text-generation-webui. Here is how to get it up and running: 1. Run the "install" script to install the web UI and its requirements in this folder. -2. Run the "download" script to download a model of your choice. Change TextOnly variable at top of script to download only config files. +2. Run the "download-model" script to download a model of your choice. Change TextOnly variable at top of script to download only config files. 3. Run the "start-webui" script to launch the web UI. To add flags like --chat, --notebook, --extensions, etc, edit the @@ -11,3 +11,9 @@ to the line that says "python server.py...". To get the latest updates in the future, just re-run the "install" script. This will only install the updates, so it should be much faster. +May need to delete '\text-generation-webui\repositories\GPTQ-for-LLaMa' folder if GPTQ-for-LLaMa needs to be updated. + +You can open a command-line attached to the virtual environment by running the "micromamba-cmd" script. + +This installer uses a custom-built Windows-compatible version of bitsandbytes. Source: https://github.com/acpopescu/bitsandbytes/tree/cmake_windows +When starting the webui, you may encounter an error referencing cuda 116. Starting the webui again should allow bitsandbytes to detect the correct version. \ No newline at end of file diff --git a/install.bat b/install.bat index 11eb264b..d1e54a0e 100644 --- a/install.bat +++ b/install.bat @@ -5,6 +5,8 @@ @rem using micromamba (an 8mb static-linked single-file binary, conda replacement). @rem This enables a user to install this project without manually installing conda and git. +&& echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces.&& echo. + echo What is your GPU? echo. echo A) NVIDIA @@ -26,7 +28,7 @@ if /I "%gpuchoice%" == "A" ( cd /D "%~dp0" -set PATH=%SystemRoot%\system32;%PATH% +set PATH=%PATH%;%SystemRoot%\system32 set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba set INSTALL_ENV_DIR=%cd%\installer_files\env @@ -45,7 +47,7 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" ( echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" mkdir "%MAMBA_ROOT_PREFIX%" - call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" + call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo Micromamba failed to download. && goto end ) @rem test the mamba binary echo Micromamba version: @@ -73,6 +75,7 @@ if exist text-generation-webui\ ( git pull ) else ( git clone https://github.com/oobabooga/text-generation-webui.git + call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl cd text-generation-webui || goto end ) call python -m pip install -r requirements.txt --upgrade @@ -97,20 +100,9 @@ if not exist GPTQ-for-LLaMa\ ( call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( echo CUDA kernal compilation failed. Will try to install from wheel. - curl -LO https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl - call python -m pip install quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) + call python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) ) - cd .. ) -cd ..\.. - -:bandaid -curl -LO https://github.com/DeXtmL/bitsandbytes-win-prebuilt/raw/main/libbitsandbytes_cpu.dll -curl -LO https://github.com/james-things/bitsandbytes-prebuilt-all_arch/raw/main/0.37.0/libbitsandbytes_cudaall.dll -mv libbitsandbytes_cpu.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" -mv libbitsandbytes_cudaall.dll "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes" -sed -i "s/if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None/if torch.cuda.is_available(): return 'libbitsandbytes_cudaall.dll', None, None, None, None\n else: return 'libbitsandbytes_cpu.dll', None, None, None, None/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" -sed -i "s/ct.cdll.LoadLibrary(binary_path)/ct.cdll.LoadLibrary(str(binary_path))/g" "%INSTALL_ENV_DIR%\lib\site-packages\bitsandbytes\cuda_setup\main.py" :end pause From 172035d2e1e534f62408afa892041c5720856b75 Mon Sep 17 00:00:00 2001 From: jllllll Date: Thu, 30 Mar 2023 20:44:56 -0500 Subject: [PATCH 032/133] Minor Correction --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index d1e54a0e..275860eb 100644 --- a/install.bat +++ b/install.bat @@ -5,7 +5,7 @@ @rem using micromamba (an 8mb static-linked single-file binary, conda replacement). @rem This enables a user to install this project without manually installing conda and git. -&& echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces.&& echo. +echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces.&& echo. echo What is your GPU? echo. From e4e3c9095d54c9b14bb3e7574fcf9343c33fbdea Mon Sep 17 00:00:00 2001 From: jllllll Date: Thu, 30 Mar 2023 20:48:40 -0500 Subject: [PATCH 033/133] Add warning for long paths --- install.bat | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 275860eb..cdda3f87 100644 --- a/install.bat +++ b/install.bat @@ -5,7 +5,8 @@ @rem using micromamba (an 8mb static-linked single-file binary, conda replacement). @rem This enables a user to install this project without manually installing conda and git. -echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces.&& echo. +echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces. +echo May also have issues with long paths.&& echo. echo What is your GPU? echo. From b704fe7878a1ff84c25e276d6ac7fec1de54c049 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 2 Apr 2023 01:10:22 -0300 Subject: [PATCH 034/133] Use my fork of GPTQ-for-LLaMa for stability --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index cdda3f87..3508e5b7 100644 --- a/install.bat +++ b/install.bat @@ -95,7 +95,7 @@ if not exist repositories\ ( ) cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( - git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda + git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda cd GPTQ-for-LLaMa || goto end call python -m pip install -r requirements.txt call python setup_cuda.py install From e3c348e42bb17adf79a5efd1d3522d8a0782ccc2 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 2 Apr 2023 01:11:05 -0300 Subject: [PATCH 035/133] Add .git --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 3508e5b7..45df4b4f 100644 --- a/install.bat +++ b/install.bat @@ -95,7 +95,7 @@ if not exist repositories\ ( ) cd repositories || goto end if not exist GPTQ-for-LLaMa\ ( - git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda + git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda cd GPTQ-for-LLaMa || goto end call python -m pip install -r requirements.txt call python setup_cuda.py install From c86d3e9c74f293efac6c8464c60639ba0a0a313e Mon Sep 17 00:00:00 2001 From: jllllll Date: Sun, 2 Apr 2023 21:28:04 -0500 Subject: [PATCH 036/133] Add -k flag to curl command Disables SSL certificate verification which was causing curl to fail on some systems. https://github.com/oobabooga/text-generation-webui/issues/644#issuecomment-1493518391 --- install.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 45df4b4f..4c185c89 100644 --- a/install.bat +++ b/install.bat @@ -48,7 +48,7 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" ( echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" mkdir "%MAMBA_ROOT_PREFIX%" - call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo Micromamba failed to download. && goto end ) + call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo Micromamba failed to download. && goto end ) @rem test the mamba binary echo Micromamba version: From 5aaf771c7de3c4dfca90cbecdcd1fc01d2a32b74 Mon Sep 17 00:00:00 2001 From: jllllll Date: Tue, 4 Apr 2023 12:31:26 -0500 Subject: [PATCH 037/133] Add additional sanity check Add environment creation error Improve error visibility --- install.bat | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/install.bat b/install.bat index 4c185c89..6a8c1f11 100644 --- a/install.bat +++ b/install.bat @@ -48,11 +48,11 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" ( echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" mkdir "%MAMBA_ROOT_PREFIX%" - call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo Micromamba failed to download. && goto end ) + call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo. && echo Micromamba failed to download. && goto end ) @rem test the mamba binary echo Micromamba version: - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version || ( echo Micromamba not found. && goto end ) + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version || ( echo. && echo Micromamba not found. && goto end ) ) @rem create micromamba hook @@ -63,12 +63,15 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" ( @rem create the installer env if not exist "%INSTALL_ENV_DIR%" ( echo Packages to install: %PACKAGES_TO_INSTALL% - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" %CHANNEL% %PACKAGES_TO_INSTALL% + call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" %CHANNEL% %PACKAGES_TO_INSTALL% || ( echo. && echo Conda environment creation failed. && goto end ) ) ) +@rem check if conda environment was actually created +if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) + @rem activate installer env -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) +call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo MicroMamba hook not found. && goto end ) @rem clone the repository and install the pip requirements if exist text-generation-webui\ ( @@ -101,7 +104,7 @@ if not exist GPTQ-for-LLaMa\ ( call python setup_cuda.py install if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( echo CUDA kernal compilation failed. Will try to install from wheel. - call python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end ) + call python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo. && echo Wheel installation failed. && goto end ) ) ) From 1e656bef253d566da620fa2a16c6e7083e2b60ed Mon Sep 17 00:00:00 2001 From: jllllll Date: Wed, 5 Apr 2023 16:52:05 -0500 Subject: [PATCH 038/133] Specifically target cuda 11.7 ver. of torch 2.0.0 Move conda-forge channel to global list of channels Hopefully prevents missing or incorrect packages --- install.bat | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 6a8c1f11..6748a848 100644 --- a/install.bat +++ b/install.bat @@ -17,8 +17,8 @@ set /p "gpuchoice=Input> " set gpuchoice=%gpuchoice:~0,1% if /I "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=python=3.10.9 torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit conda-forge::ninja conda-forge::git" - set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia" + set "PACKAGES_TO_INSTALL=python=3.10.9 pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git" + set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia -c conda-forge" ) else if /I "%gpuchoice%" == "B" ( set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly git" set "CHANNEL=-c conda-forge -c pytorch" From 0818bc93ad2aec65bfba6d369d299a736134d986 Mon Sep 17 00:00:00 2001 From: Lou Bernardi Date: Sat, 8 Apr 2023 22:44:55 -0400 Subject: [PATCH 039/133] Add working llamaa-cpp-python install from wheel. --- install.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/install.bat b/install.bat index 6748a848..90ac0263 100644 --- a/install.bat +++ b/install.bat @@ -80,6 +80,7 @@ if exist text-generation-webui\ ( ) else ( git clone https://github.com/oobabooga/text-generation-webui.git call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl + call python -m pip install https://github.com/abetlen/llama-cpp-python/raw/main/wheels/llama_cpp_python-0.1.26-cp310-cp310-win_amd64.whl --no-deps cd text-generation-webui || goto end ) call python -m pip install -r requirements.txt --upgrade From c3e1a58cb3a77a7bf875936e6978eb832a4408df Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sun, 9 Apr 2023 21:46:54 -0500 Subject: [PATCH 040/133] Correct llama-cpp-python wheel link (#17) --- install.bat | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/install.bat b/install.bat index 90ac0263..0c9749ca 100644 --- a/install.bat +++ b/install.bat @@ -8,6 +8,9 @@ echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces. echo May also have issues with long paths.&& echo. +pause +cls + echo What is your GPU? echo. echo A) NVIDIA @@ -80,7 +83,7 @@ if exist text-generation-webui\ ( ) else ( git clone https://github.com/oobabooga/text-generation-webui.git call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl - call python -m pip install https://github.com/abetlen/llama-cpp-python/raw/main/wheels/llama_cpp_python-0.1.26-cp310-cp310-win_amd64.whl --no-deps + call python -m pip install https://github.com/Loufe/llama-cpp-python/raw/main/wheels/llama_cpp_python-0.1.26-cp310-cp310-win_amd64.whl --no-deps cd text-generation-webui || goto end ) call python -m pip install -r requirements.txt --upgrade From 254609daca1bc8e110f01e3d81f1c9b5ac60c00f Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Mon, 10 Apr 2023 08:48:56 -0500 Subject: [PATCH 041/133] Update llama-cpp-python link to official wheel (#19) --- install.bat | 1 - start-webui.bat | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/install.bat b/install.bat index 0c9749ca..8f904889 100644 --- a/install.bat +++ b/install.bat @@ -83,7 +83,6 @@ if exist text-generation-webui\ ( ) else ( git clone https://github.com/oobabooga/text-generation-webui.git call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl - call python -m pip install https://github.com/Loufe/llama-cpp-python/raw/main/wheels/llama_cpp_python-0.1.26-cp310-cp310-win_amd64.whl --no-deps cd text-generation-webui || goto end ) call python -m pip install -r requirements.txt --upgrade diff --git a/start-webui.bat b/start-webui.bat index 694f07a1..23b5b8c3 100644 --- a/start-webui.bat +++ b/start-webui.bat @@ -13,7 +13,7 @@ if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) cd text-generation-webui -call python server.py --auto-devices --cai-chat +call python server.py --auto-devices --chat :end pause From 6d2c72b593f2b4c97e38189341c1755407bcb22c Mon Sep 17 00:00:00 2001 From: Blake Wyatt <894305+xNul@users.noreply.github.com> Date: Tue, 18 Apr 2023 01:23:09 -0400 Subject: [PATCH 042/133] Add support for MacOS, Linux, and WSL (#21) * Initial commit * Initial commit with new code * Add comments * Move GPTQ out of if * Fix install on Arch Linux * Fix case where install was aborted If the install was aborted before a model was downloaded, webui wouldn't run. * Update start_windows.bat Add necessary flags to Miniconda installer Disable Start Menu shortcut creation Disable ssl on Conda Change Python version to latest 3.10, I've noticed that explicitly specifying 3.10.9 can break the included Python installation * Update bitsandbytes wheel link to 0.38.1 Disable ssl on Conda * Add check for spaces in path Installation of Miniconda will fail in this case * Mirror changes to mac and linux scripts * Start with model-menu * Add updaters * Fix line endings * Add check for path with spaces * Fix one-click updating * Fix one-click updating * Clean up update scripts * Add environment scripts --------- Co-authored-by: jllllll <3887729+jllllll@users.noreply.github.com> Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com> --- INSTRUCTIONS.txt | 19 ----- download-model.bat | 26 ------ environment_linux.sh | 12 +++ environment_macos.sh | 12 +++ environment_windows.bat | 20 +++++ install.bat | 115 --------------------------- micromamba-cmd.bat | 16 ---- start-webui.bat | 19 ----- start_linux.sh | 59 ++++++++++++++ start_macos.sh | 60 ++++++++++++++ start_windows.bat | 55 +++++++++++++ update_linux.sh | 19 +++++ update_macos.sh | 19 +++++ update_windows.bat | 23 ++++++ webui.py | 171 ++++++++++++++++++++++++++++++++++++++++ 15 files changed, 450 insertions(+), 195 deletions(-) delete mode 100644 INSTRUCTIONS.txt delete mode 100644 download-model.bat create mode 100644 environment_linux.sh create mode 100644 environment_macos.sh create mode 100644 environment_windows.bat delete mode 100644 install.bat delete mode 100644 micromamba-cmd.bat delete mode 100644 start-webui.bat create mode 100644 start_linux.sh create mode 100644 start_macos.sh create mode 100644 start_windows.bat create mode 100644 update_linux.sh create mode 100644 update_macos.sh create mode 100644 update_windows.bat create mode 100644 webui.py diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt deleted file mode 100644 index cb655f60..00000000 --- a/INSTRUCTIONS.txt +++ /dev/null @@ -1,19 +0,0 @@ -Thank you for downloading oobabooga/text-generation-webui. -Here is how to get it up and running: - -1. Run the "install" script to install the web UI and its requirements in this folder. -2. Run the "download-model" script to download a model of your choice. Change TextOnly variable at top of script to download only config files. -3. Run the "start-webui" script to launch the web UI. - -To add flags like --chat, --notebook, --extensions, etc, edit the -"start-webui" script using a text editor and add the desired flags -to the line that says "python server.py...". - -To get the latest updates in the future, just re-run the "install" script. -This will only install the updates, so it should be much faster. -May need to delete '\text-generation-webui\repositories\GPTQ-for-LLaMa' folder if GPTQ-for-LLaMa needs to be updated. - -You can open a command-line attached to the virtual environment by running the "micromamba-cmd" script. - -This installer uses a custom-built Windows-compatible version of bitsandbytes. Source: https://github.com/acpopescu/bitsandbytes/tree/cmake_windows -When starting the webui, you may encounter an error referencing cuda 116. Starting the webui again should allow bitsandbytes to detect the correct version. \ No newline at end of file diff --git a/download-model.bat b/download-model.bat deleted file mode 100644 index af2610c6..00000000 --- a/download-model.bat +++ /dev/null @@ -1,26 +0,0 @@ -@echo off - -SET TextOnly=False &REM True or False for Text only mode - -cd /D "%~dp0" - -set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba -set INSTALL_ENV_DIR=%cd%\installer_files\env - -if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 -) -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) - -cd text-generation-webui || goto end -goto %TextOnly% - -:False -call python download-model.py -goto end - -:True -call python download-model.py --text-only - -:end -pause diff --git a/environment_linux.sh b/environment_linux.sh new file mode 100644 index 00000000..08717db5 --- /dev/null +++ b/environment_linux.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# activate env +bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") diff --git a/environment_macos.sh b/environment_macos.sh new file mode 100644 index 00000000..08717db5 --- /dev/null +++ b/environment_macos.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# activate env +bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") diff --git a/environment_windows.bat b/environment_windows.bat new file mode 100644 index 00000000..e7463ebb --- /dev/null +++ b/environment_windows.bat @@ -0,0 +1,20 @@ +@echo off + +cd /D "%~dp0" + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem config +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem enter commands +cmd /k "%*" + +:end +pause diff --git a/install.bat b/install.bat deleted file mode 100644 index 8f904889..00000000 --- a/install.bat +++ /dev/null @@ -1,115 +0,0 @@ -@echo off - -@rem Based on the installer found here: https://github.com/Sygil-Dev/sygil-webui -@rem This script will install git and all dependencies -@rem using micromamba (an 8mb static-linked single-file binary, conda replacement). -@rem This enables a user to install this project without manually installing conda and git. - -echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces. -echo May also have issues with long paths.&& echo. - -pause -cls - -echo What is your GPU? -echo. -echo A) NVIDIA -echo B) None (I want to run in CPU mode) -echo. -set /p "gpuchoice=Input> " -set gpuchoice=%gpuchoice:~0,1% - -if /I "%gpuchoice%" == "A" ( - set "PACKAGES_TO_INSTALL=python=3.10.9 pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git" - set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia -c conda-forge" -) else if /I "%gpuchoice%" == "B" ( - set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly git" - set "CHANNEL=-c conda-forge -c pytorch" -) else ( - echo Invalid choice. Exiting... - exit -) - -cd /D "%~dp0" - -set PATH=%PATH%;%SystemRoot%\system32 - -set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba -set INSTALL_ENV_DIR=%cd%\installer_files\env -set MICROMAMBA_DOWNLOAD_URL=https://github.com/mamba-org/micromamba-releases/releases/download/1.4.0-0/micromamba-win-64 -set REPO_URL=https://github.com/oobabooga/text-generation-webui.git -set umamba_exists=F - -@rem figure out whether git and conda needs to be installed -call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >nul 2>&1 -if "%ERRORLEVEL%" EQU "0" set umamba_exists=T - -@rem (if necessary) install git and conda into a contained environment -if "%PACKAGES_TO_INSTALL%" NEQ "" ( - @rem download micromamba - if "%umamba_exists%" == "F" ( - echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe" - - mkdir "%MAMBA_ROOT_PREFIX%" - call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo. && echo Micromamba failed to download. && goto end ) - - @rem test the mamba binary - echo Micromamba version: - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version || ( echo. && echo Micromamba not found. && goto end ) - ) - - @rem create micromamba hook - if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 - ) - - @rem create the installer env - if not exist "%INSTALL_ENV_DIR%" ( - echo Packages to install: %PACKAGES_TO_INSTALL% - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" %CHANNEL% %PACKAGES_TO_INSTALL% || ( echo. && echo Conda environment creation failed. && goto end ) - ) -) - -@rem check if conda environment was actually created -if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) - -@rem activate installer env -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo MicroMamba hook not found. && goto end ) - -@rem clone the repository and install the pip requirements -if exist text-generation-webui\ ( - cd text-generation-webui - git pull -) else ( - git clone https://github.com/oobabooga/text-generation-webui.git - call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl - cd text-generation-webui || goto end -) -call python -m pip install -r requirements.txt --upgrade -call python -m pip install -r extensions\api\requirements.txt --upgrade -call python -m pip install -r extensions\elevenlabs_tts\requirements.txt --upgrade -call python -m pip install -r extensions\google_translate\requirements.txt --upgrade -call python -m pip install -r extensions\silero_tts\requirements.txt --upgrade -call python -m pip install -r extensions\whisper_stt\requirements.txt --upgrade - -@rem skip gptq install if cpu only -if /I not "%gpuchoice%" == "A" goto bandaid - -@rem download gptq and compile locally and if compile fails, install from wheel -if not exist repositories\ ( - mkdir repositories -) -cd repositories || goto end -if not exist GPTQ-for-LLaMa\ ( - git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda - cd GPTQ-for-LLaMa || goto end - call python -m pip install -r requirements.txt - call python setup_cuda.py install - if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" ( - echo CUDA kernal compilation failed. Will try to install from wheel. - call python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo. && echo Wheel installation failed. && goto end ) - ) -) - -:end -pause diff --git a/micromamba-cmd.bat b/micromamba-cmd.bat deleted file mode 100644 index 355e7b43..00000000 --- a/micromamba-cmd.bat +++ /dev/null @@ -1,16 +0,0 @@ -@echo off - -cd /D "%~dp0" - -set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba -set INSTALL_ENV_DIR=%cd%\installer_files\env - -if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 -) -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) - -cmd /k "%*" - -:end -pause diff --git a/start-webui.bat b/start-webui.bat deleted file mode 100644 index 23b5b8c3..00000000 --- a/start-webui.bat +++ /dev/null @@ -1,19 +0,0 @@ -@echo off - -@echo Starting the web UI... - -cd /D "%~dp0" - -set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba -set INSTALL_ENV_DIR=%cd%\installer_files\env - -if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" ( - call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1 -) -call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end ) -cd text-generation-webui - -call python server.py --auto-devices --chat - -:end -pause diff --git a/start_linux.sh b/start_linux.sh new file mode 100644 index 00000000..8d3ecfc5 --- /dev/null +++ b/start_linux.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +OS_ARCH=$(uname -m) +case "${OS_ARCH}" in + x86_64*) OS_ARCH="x86_64";; + arm64*) OS_ARCH="aarch64";; + *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit +esac + +# config +INSTALL_DIR="$(pwd)/installer_files" +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Linux-${OS_ARCH}.sh" +conda_exists="F" + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + mkdir -p "$INSTALL_DIR" + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env +python webui.py + +echo +echo "Done!" diff --git a/start_macos.sh b/start_macos.sh new file mode 100644 index 00000000..5686ce22 --- /dev/null +++ b/start_macos.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# M Series or Intel +OS_ARCH=$(uname -m) +case "${OS_ARCH}" in + x86_64*) OS_ARCH="x86_64";; + arm64*) OS_ARCH="arm64";; + *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit +esac + +# config +INSTALL_DIR="$(pwd)/installer_files" +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-MacOSX-${OS_ARCH}.sh" +conda_exists="F" + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + mkdir -p "$INSTALL_DIR" + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env +python webui.py + +echo +echo "Done!" diff --git a/start_windows.bat b/start_windows.bat new file mode 100644 index 00000000..d647fd20 --- /dev/null +++ b/start_windows.bat @@ -0,0 +1,55 @@ +@echo off + +cd /D "%~dp0" + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem config +set INSTALL_DIR=%cd%\installer_files +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env +set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Windows-x86_64.exe +set conda_exists=F + +@rem figure out whether git and conda needs to be installed +call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1 +if "%ERRORLEVEL%" EQU "0" set conda_exists=T + +@rem (if necessary) install git and conda into a contained environment +@rem download conda +if "%conda_exists%" == "F" ( + echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe + + mkdir "%INSTALL_DIR%" + call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end ) + + echo Installing Miniconda to %CONDA_ROOT_PREFIX% + start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX% + + @rem test the conda binary + echo Miniconda version: + call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end ) +) + +@rem create the installer env +if not exist "%INSTALL_ENV_DIR%" ( + echo Packages to install: %PACKAGES_TO_INSTALL% + call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) +) + +@rem check if conda environment was actually created +if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem setup installer env +call python webui.py + +echo. +echo Done! + +:end +pause diff --git a/update_linux.sh b/update_linux.sh new file mode 100644 index 00000000..5966f9d9 --- /dev/null +++ b/update_linux.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# update installer env +python webui.py --update + +echo +echo "Done!" diff --git a/update_macos.sh b/update_macos.sh new file mode 100644 index 00000000..5966f9d9 --- /dev/null +++ b/update_macos.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# update installer env +python webui.py --update + +echo +echo "Done!" diff --git a/update_windows.bat b/update_windows.bat new file mode 100644 index 00000000..73241a1d --- /dev/null +++ b/update_windows.bat @@ -0,0 +1,23 @@ +@echo off + +cd /D "%~dp0" + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem config +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem update installer env +call python webui.py --update + +echo. +echo Done! + +:end +pause diff --git a/webui.py b/webui.py new file mode 100644 index 00000000..b118f80b --- /dev/null +++ b/webui.py @@ -0,0 +1,171 @@ +import argparse +import glob +import os +import shutil +import site +import subprocess +import sys + +script_dir = os.getcwd() + + +def run_cmd(cmd, capture_output=False, env=None): + # Run shell commands + return subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) + + +def check_env(): + # If we have access to conda, we are probably in an environment + conda_not_exist = run_cmd("conda", capture_output=True).returncode + if conda_not_exist: + print("Conda is not installed. Exiting...") + sys.exit() + + # Ensure this is a new environment and not the base environment + if os.environ["CONDA_DEFAULT_ENV"] == "base": + print("Create an environment for this project and activate it. Exiting...") + sys.exit() + + +def install_dependencies(): + # Select your GPU or, choose to run in CPU mode + print("What is your GPU") + print() + print("A) NVIDIA") + print("B) AMD") + print("C) Apple M Series") + print("D) None (I want to run in CPU mode)") + print() + gpuchoice = input("Input> ").lower() + + # Install the version of PyTorch needed + if gpuchoice == "a": + run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia") + elif gpuchoice == "b": + print("AMD GPUs are not supported. Exiting...") + sys.exit() + elif gpuchoice == "c" or gpuchoice == "d": + run_cmd("conda install -y -k pytorch torchvision torchaudio cpuonly git -c pytorch") + else: + print("Invalid choice. Exiting...") + sys.exit() + + # Clone webui to our computer + run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git") + if sys.platform.startswith("win"): + # Fix a bitsandbytes compatibility issue with Windows + run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl") + + # Install the webui dependencies + update_dependencies() + + +def update_dependencies(): + os.chdir("text-generation-webui") + run_cmd("git pull") + + # Installs/Updates dependencies from all requirements.txt + run_cmd("python -m pip install -r requirements.txt --upgrade") + extensions = next(os.walk("extensions"))[1] + for extension in extensions: + extension_req_path = os.path.join("extensions", extension, "requirements.txt") + if os.path.exists(extension_req_path): + run_cmd("python -m pip install -r " + extension_req_path + " --upgrade") + + # The following dependencies are for CUDA, not CPU + # Check if the package cpuonly exists to determine if torch uses CUDA or not + cpuonly_exist = not run_cmd("conda list cpuonly | grep cpuonly", capture_output=True).returncode + if cpuonly_exist: + return + + # Finds the path to your dependencies + for sitedir in site.getsitepackages(): + if "site-packages" in sitedir: + site_packages_path = sitedir + break + + # This path is critical to installing the following dependencies + if site_packages_path is None: + print("Could not find the path to your Python packages. Exiting...") + sys.exit() + + # Fix a bitsandbytes compatibility issue with Linux + if sys.platform.startswith("linux"): + shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so")) + + if not os.path.exists("repositories/"): + os.mkdir("repositories") + + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization + os.chdir("repositories") + if not os.path.exists("GPTQ-for-LLaMa/"): + run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda") + + # Install GPTQ-for-LLaMa dependencies + os.chdir("GPTQ-for-LLaMa") + run_cmd("git pull") + run_cmd("python -m pip install -r requirements.txt") + + # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa + install_flag = True + if sys.platform.startswith("linux"): + gxx_output = run_cmd("g++ --version", capture_output=True) + if gxx_output.returncode != 0 or b"g++ (GCC) 12" in gxx_output.stdout: + # Install the correct version of g++ + run_cmd("conda install -y -k gxx_linux-64=11.2.0") + + # Activate the conda environment to compile GPTQ-for-LLaMa + conda_env_path = os.path.join(script_dir, "installer_files", "env") + conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") + run_cmd(". " + conda_sh_path + " && conda activate " + conda_env_path + " && python setup_cuda.py install") + install_flag = False + + if install_flag: + run_cmd("python setup_cuda.py install") + install_flag = False + + # If the path does not exist, then the install failed + quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") + if not glob.glob(quant_cuda_path_regex): + print("CUDA kernel compilation failed.") + # Attempt installation via alternative, Windows-specific method + if sys.platform.startswith("win"): + print("Attempting installation with wheel.") + result = run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl") + if result.returncode == 1: + print("Wheel installation failed.") + + +def download_model(): + os.chdir("text-generation-webui") + run_cmd("python download-model.py") + + +def run_model(): + os.chdir("text-generation-webui") + run_cmd("python server.py --auto-devices --chat --model-menu") + + +if __name__ == "__main__": + # Verifies we are in a conda environment + check_env() + + parser = argparse.ArgumentParser() + parser.add_argument('--update', action='store_true', help='Update the web UI.') + args = parser.parse_args() + + if args.update: + update_dependencies() + else: + # If webui has already been installed, skip and run + if not os.path.exists("text-generation-webui/"): + install_dependencies() + os.chdir(script_dir) + + # Check if a model has been downloaded yet + if len(glob.glob("text-generation-webui/models/*/")) == 0: + download_model() + os.chdir(script_dir) + + # Run the model with webui + run_model() From 647f7bca36f33ad10d5021955530dcdeba360270 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 18 Apr 2023 02:29:55 -0300 Subject: [PATCH 043/133] Rename environment_linux.sh to cmd_linux.sh --- environment_linux.sh => cmd_linux.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename environment_linux.sh => cmd_linux.sh (100%) diff --git a/environment_linux.sh b/cmd_linux.sh similarity index 100% rename from environment_linux.sh rename to cmd_linux.sh From 316aaff348d0e2cf135e81adddee6c81f32b972f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 18 Apr 2023 02:30:08 -0300 Subject: [PATCH 044/133] Rename environment_macos.sh to cmd_macos.sh --- environment_macos.sh => cmd_macos.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename environment_macos.sh => cmd_macos.sh (100%) diff --git a/environment_macos.sh b/cmd_macos.sh similarity index 100% rename from environment_macos.sh rename to cmd_macos.sh From a5f7d98cf32b369be3b7af600601bd0ac04d4215 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 18 Apr 2023 02:30:23 -0300 Subject: [PATCH 045/133] Rename environment_windows.bat to cmd_windows.bat --- environment_windows.bat => cmd_windows.bat | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename environment_windows.bat => cmd_windows.bat (100%) diff --git a/environment_windows.bat b/cmd_windows.bat similarity index 100% rename from environment_windows.bat rename to cmd_windows.bat From 1ba0082410e6a364a4fd09badfa1666fec213573 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 18 Apr 2023 02:30:47 -0300 Subject: [PATCH 046/133] Add files via upload --- INSTRUCTIONS.TXT | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 INSTRUCTIONS.TXT diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT new file mode 100644 index 00000000..9e202d14 --- /dev/null +++ b/INSTRUCTIONS.TXT @@ -0,0 +1,30 @@ +Thank you for downloading oobabooga/text-generation-webui. + +# Installation + +Simply run the "start" script. + +# Updating + +Run the "update" script. This will only install the updates, so it should +be much faster than the initial installation. + +May need to delete the 'text-generation-webui\repositories\GPTQ-for-LLaMa' +folder if GPTQ-for-LLaMa needs to be updated. + +# Adding flags like --chat, --notebook, etc + +Edit the "start" script using a text editor and add the desired flags +to the line that says + +run_cmd("python server.py --auto-devices --chat --model-menu") + +For instance, to add the --notebook flag, change it to + +run_cmd("python server.py --auto-devices --notebook --model-menu") + +# Running an interactive shell + +In order to run an interactive shell in the miniconda environment, run +the "cmd" script. This is useful for installing additional requirements +manually. From dfbb18610f4735b3dca8be5118709b02e9ae4efc Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 23 Apr 2023 12:58:14 -0300 Subject: [PATCH 047/133] Update INSTRUCTIONS.TXT --- INSTRUCTIONS.TXT | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index 9e202d14..504b1473 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -14,7 +14,7 @@ folder if GPTQ-for-LLaMa needs to be updated. # Adding flags like --chat, --notebook, etc -Edit the "start" script using a text editor and add the desired flags +Edit the "webui.py" script using a text editor and add the desired flags to the line that says run_cmd("python server.py --auto-devices --chat --model-menu") From 9a8487097bdb886adf398775ea4b3e42565d319f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:43:52 -0300 Subject: [PATCH 048/133] Remove --auto-devices --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index b118f80b..136045f9 100644 --- a/webui.py +++ b/webui.py @@ -143,7 +143,7 @@ def download_model(): def run_model(): os.chdir("text-generation-webui") - run_cmd("python server.py --auto-devices --chat --model-menu") + run_cmd("python server.py --chat --model-menu") if __name__ == "__main__": From a4f6724b881d530131eb5e6bbf43b825efcb3f24 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:47:22 -0300 Subject: [PATCH 049/133] Add a comment --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 136045f9..8ab51cc7 100644 --- a/webui.py +++ b/webui.py @@ -143,7 +143,7 @@ def download_model(): def run_model(): os.chdir("text-generation-webui") - run_cmd("python server.py --chat --model-menu") + run_cmd("python server.py --chat --model-menu") # put your flags here! if __name__ == "__main__": From d66059d95a5dd43ced706476671843a057984ce8 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:50:03 -0300 Subject: [PATCH 050/133] Update INSTRUCTIONS.TXT --- INSTRUCTIONS.TXT | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index 504b1473..0eda45e4 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -17,11 +17,11 @@ folder if GPTQ-for-LLaMa needs to be updated. Edit the "webui.py" script using a text editor and add the desired flags to the line that says -run_cmd("python server.py --auto-devices --chat --model-menu") +run_cmd("python server.py --chat --model-menu") For instance, to add the --notebook flag, change it to -run_cmd("python server.py --auto-devices --notebook --model-menu") +run_cmd("python server.py --notebook --model-menu") # Running an interactive shell From bcd5786a479c5a5540096dde8a47767cacdf71a0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 24 Apr 2023 16:53:04 -0300 Subject: [PATCH 051/133] Add files via upload --- generate_zips.sh | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 generate_zips.sh diff --git a/generate_zips.sh b/generate_zips.sh new file mode 100644 index 00000000..1bfe7247 --- /dev/null +++ b/generate_zips.sh @@ -0,0 +1,5 @@ +mkdir oobabooga_{windows,linux,macos} +for p in windows macos linux; do + cp {*$p*\.*,webui.py,INSTRUCTIONS.TXT} oobabooga_$p; + zip -r oobabooga_$p.zip oobabooga_$p; +done From 4babb22f846e74f096af5f487a2b4a6942b3f3c3 Mon Sep 17 00:00:00 2001 From: Blake Wyatt <894305+xNul@users.noreply.github.com> Date: Tue, 2 May 2023 11:28:20 -0400 Subject: [PATCH 052/133] Fix/Improve a bunch of things (#42) --- cmd_windows.bat | 4 +++ start_windows.bat | 4 +++ update_windows.bat | 4 +++ webui.py | 83 ++++++++++++++++++++++++++++------------------ 4 files changed, 62 insertions(+), 33 deletions(-) diff --git a/cmd_windows.bat b/cmd_windows.bat index e7463ebb..0dc1bb8f 100644 --- a/cmd_windows.bat +++ b/cmd_windows.bat @@ -6,6 +6,10 @@ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which c set PATH=%PATH%;%SystemRoot%\system32 +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env diff --git a/start_windows.bat b/start_windows.bat index d647fd20..99608eba 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -6,6 +6,10 @@ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which c set PATH=%PATH%;%SystemRoot%\system32 +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + @rem config set INSTALL_DIR=%cd%\installer_files set CONDA_ROOT_PREFIX=%cd%\installer_files\conda diff --git a/update_windows.bat b/update_windows.bat index 73241a1d..7d1c7eb1 100644 --- a/update_windows.bat +++ b/update_windows.bat @@ -6,6 +6,10 @@ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which c set PATH=%PATH%;%SystemRoot%\system32 +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env diff --git a/webui.py b/webui.py index 8ab51cc7..09058623 100644 --- a/webui.py +++ b/webui.py @@ -9,15 +9,31 @@ import sys script_dir = os.getcwd() -def run_cmd(cmd, capture_output=False, env=None): +def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): + # Use the conda environment + if environment: + conda_env_path = os.path.join(script_dir, "installer_files", "env") + if sys.platform.startswith("win"): + conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") + cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd + else: + conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") + cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd + # Run shell commands - return subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) + result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) + + # Assert the command ran successfully + if assert_success and result.returncode != 0: + print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") + sys.exit() + return result def check_env(): # If we have access to conda, we are probably in an environment - conda_not_exist = run_cmd("conda", capture_output=True).returncode - if conda_not_exist: + conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0 + if not conda_exist: print("Conda is not installed. Exiting...") sys.exit() @@ -40,21 +56,21 @@ def install_dependencies(): # Install the version of PyTorch needed if gpuchoice == "a": - run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia") + run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia", assert_success=True, environment=True) elif gpuchoice == "b": print("AMD GPUs are not supported. Exiting...") sys.exit() elif gpuchoice == "c" or gpuchoice == "d": - run_cmd("conda install -y -k pytorch torchvision torchaudio cpuonly git -c pytorch") + run_cmd("conda install -y -k pytorch torchvision torchaudio cpuonly git -c pytorch", assert_success=True, environment=True) else: print("Invalid choice. Exiting...") sys.exit() # Clone webui to our computer - run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git") + run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) if sys.platform.startswith("win"): # Fix a bitsandbytes compatibility issue with Windows - run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl") + run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True) # Install the webui dependencies update_dependencies() @@ -62,19 +78,19 @@ def install_dependencies(): def update_dependencies(): os.chdir("text-generation-webui") - run_cmd("git pull") + run_cmd("git pull", assert_success=True, environment=True) # Installs/Updates dependencies from all requirements.txt - run_cmd("python -m pip install -r requirements.txt --upgrade") + run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) extensions = next(os.walk("extensions"))[1] for extension in extensions: extension_req_path = os.path.join("extensions", extension, "requirements.txt") if os.path.exists(extension_req_path): - run_cmd("python -m pip install -r " + extension_req_path + " --upgrade") + run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) # The following dependencies are for CUDA, not CPU # Check if the package cpuonly exists to determine if torch uses CUDA or not - cpuonly_exist = not run_cmd("conda list cpuonly | grep cpuonly", capture_output=True).returncode + cpuonly_exist = run_cmd("conda list cpuonly | grep cpuonly", environment=True, capture_output=True).returncode == 0 if cpuonly_exist: return @@ -99,51 +115,52 @@ def update_dependencies(): # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization os.chdir("repositories") if not os.path.exists("GPTQ-for-LLaMa/"): - run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda") + run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) # Install GPTQ-for-LLaMa dependencies os.chdir("GPTQ-for-LLaMa") - run_cmd("git pull") - run_cmd("python -m pip install -r requirements.txt") + run_cmd("git pull", assert_success=True, environment=True) + run_cmd("python -m pip install -r requirements.txt", assert_success=True, environment=True) # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa - install_flag = True if sys.platform.startswith("linux"): - gxx_output = run_cmd("g++ --version", capture_output=True) + gxx_output = run_cmd("g++ --version", environment=True, capture_output=True) if gxx_output.returncode != 0 or b"g++ (GCC) 12" in gxx_output.stdout: # Install the correct version of g++ - run_cmd("conda install -y -k gxx_linux-64=11.2.0") - - # Activate the conda environment to compile GPTQ-for-LLaMa - conda_env_path = os.path.join(script_dir, "installer_files", "env") - conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") - run_cmd(". " + conda_sh_path + " && conda activate " + conda_env_path + " && python setup_cuda.py install") - install_flag = False + run_cmd("conda install -y -k gxx_linux-64=11.2.0", environment=True) - if install_flag: - run_cmd("python setup_cuda.py install") - install_flag = False + # Compile and install GPTQ-for-LLaMa + os.rename("setup_cuda.py", "setup.py") + run_cmd("python -m pip install .", environment=True) + + # Wheel installation can fail while in the build directory of a package with the same name + os.chdir("..") # If the path does not exist, then the install failed quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") if not glob.glob(quant_cuda_path_regex): - print("CUDA kernel compilation failed.") + print("ERROR: GPTQ CUDA kernel compilation failed.") # Attempt installation via alternative, Windows-specific method if sys.platform.startswith("win"): print("Attempting installation with wheel.") - result = run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl") - if result.returncode == 1: - print("Wheel installation failed.") + result = run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl", environment=True) + if result.returncode == 0: + print("Wheel installation success!") + else: + print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.") + else: + print("You will not be able to use GPTQ-based models.") + print("Continuing with install..") def download_model(): os.chdir("text-generation-webui") - run_cmd("python download-model.py") + run_cmd("python download-model.py", environment=True) def run_model(): os.chdir("text-generation-webui") - run_cmd("python server.py --chat --model-menu") # put your flags here! + run_cmd("python server.py --chat --model-menu", environment=True) # put your flags here! if __name__ == "__main__": From 24c5ba2b9c2b5b137f27d9e1d5c69cf863c2d850 Mon Sep 17 00:00:00 2001 From: Semih Aslan <110099203+andmydignity@users.noreply.github.com> Date: Wed, 3 May 2023 02:47:03 +0000 Subject: [PATCH 053/133] Fixed error when $OS_ARCH returns aarch64 (#45) For some machines $OS_ARCH returns aarch64 instead of ARM64,and as i see here this should fix it. --- start_linux.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/start_linux.sh b/start_linux.sh index 8d3ecfc5..5359179c 100644 --- a/start_linux.sh +++ b/start_linux.sh @@ -8,6 +8,7 @@ OS_ARCH=$(uname -m) case "${OS_ARCH}" in x86_64*) OS_ARCH="x86_64";; arm64*) OS_ARCH="aarch64";; + aarch64*) OS_ARCH="aarch64";; *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit esac From dec31af910713edcc268e7247b4ac5dd4e1421a6 Mon Sep 17 00:00:00 2001 From: Roberts Slisans Date: Wed, 3 May 2023 05:47:19 +0300 Subject: [PATCH 054/133] Create .gitignore (#43) --- .gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..31a2b2dc --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +installer_files/ From 126d216384b65e28936a432a7a5514907f0c5b92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Semjon=20Kravt=C5=A1enko?= Date: Sat, 6 May 2023 07:14:09 +0300 Subject: [PATCH 055/133] Fix possible crash (#53) --- webui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 09058623..a94dfdd3 100644 --- a/webui.py +++ b/webui.py @@ -130,7 +130,8 @@ def update_dependencies(): run_cmd("conda install -y -k gxx_linux-64=11.2.0", environment=True) # Compile and install GPTQ-for-LLaMa - os.rename("setup_cuda.py", "setup.py") + if os.path.exists('setup_cuda.py'): + os.rename("setup_cuda.py", "setup.py") run_cmd("python -m pip install .", environment=True) # Wheel installation can fail while in the build directory of a package with the same name From 29727c6502701b960c2c299fa63f502b3ef3476c Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Mon, 8 May 2023 23:49:27 -0500 Subject: [PATCH 056/133] Fix Windows PATH fix (#57) --- cmd_windows.bat | 4 ++-- start_windows.bat | 4 ++-- update_windows.bat | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd_windows.bat b/cmd_windows.bat index 0dc1bb8f..4a5f4f0e 100644 --- a/cmd_windows.bat +++ b/cmd_windows.bat @@ -2,10 +2,10 @@ cd /D "%~dp0" -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - set PATH=%PATH%;%SystemRoot%\system32 +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + @rem fix failed install when installing to a separate drive set TMP=%cd%\installer_files set TEMP=%cd%\installer_files diff --git a/start_windows.bat b/start_windows.bat index 99608eba..9af086e6 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -2,10 +2,10 @@ cd /D "%~dp0" -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - set PATH=%PATH%;%SystemRoot%\system32 +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + @rem fix failed install when installing to a separate drive set TMP=%cd%\installer_files set TEMP=%cd%\installer_files diff --git a/update_windows.bat b/update_windows.bat index 7d1c7eb1..4701ab48 100644 --- a/update_windows.bat +++ b/update_windows.bat @@ -2,10 +2,10 @@ cd /D "%~dp0" -echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end - set PATH=%PATH%;%SystemRoot%\system32 +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + @rem fix failed install when installing to a separate drive set TMP=%cd%\installer_files set TEMP=%cd%\installer_files From b8cfc20e58d72489f09fc937cd837be842ad43d6 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 9 May 2023 14:17:08 -0300 Subject: [PATCH 057/133] Don't install superbooga by default --- webui.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/webui.py b/webui.py index a94dfdd3..9becbf51 100644 --- a/webui.py +++ b/webui.py @@ -84,6 +84,9 @@ def update_dependencies(): run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) extensions = next(os.walk("extensions"))[1] for extension in extensions: + if extension in ['superbooga']: # No wheels available for dependencies + continue + extension_req_path = os.path.join("extensions", extension, "requirements.txt") if os.path.exists(extension_req_path): run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) From d7d3f7f31cbabefc5283edfe591717dad9eba6dd Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 10 May 2023 17:54:12 -0300 Subject: [PATCH 058/133] Add a "CMD_FLAGS" variable --- webui.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 9becbf51..d9d05ce0 100644 --- a/webui.py +++ b/webui.py @@ -8,6 +8,9 @@ import sys script_dir = os.getcwd() +# Use this to set your command-line flags. For the full list, see: +# https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui +CMD_FLAGS = '--chat --model-menu' def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): # Use the conda environment @@ -164,7 +167,7 @@ def download_model(): def run_model(): os.chdir("text-generation-webui") - run_cmd("python server.py --chat --model-menu", environment=True) # put your flags here! + run_cmd(f"python server.py {CMD_FLAGS}", environment=True) # put your flags here! if __name__ == "__main__": From 4ab5deeea01c9aa0f0a8e1eb027e715b67354ef0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 10 May 2023 18:00:37 -0300 Subject: [PATCH 059/133] Update INSTRUCTIONS.TXT --- INSTRUCTIONS.TXT | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index 0eda45e4..9cd06c29 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -2,7 +2,11 @@ Thank you for downloading oobabooga/text-generation-webui. # Installation -Simply run the "start" script. +Run the "start" script. If all goes right, it should take care of +everything for you. + +To launch the web UI in the future after it is already installed, run +this same "start" script. # Updating @@ -15,16 +19,15 @@ folder if GPTQ-for-LLaMa needs to be updated. # Adding flags like --chat, --notebook, etc Edit the "webui.py" script using a text editor and add the desired flags -to the line that says +to the CMD_FLAGS variable at the top. It should look like this: -run_cmd("python server.py --chat --model-menu") +CMD_FLAGS = '--chat --model-menu' For instance, to add the --notebook flag, change it to -run_cmd("python server.py --notebook --model-menu") +CMD_FLAGS = '--notebook --model-menu' # Running an interactive shell -In order to run an interactive shell in the miniconda environment, run -the "cmd" script. This is useful for installing additional requirements -manually. +To run an interactive shell in the miniconda environment, run the "cmd" +script. This is useful for installing additional requirements manually. From 3e19733d35ca8bcdf0d482f72f622f8fd435e6bd Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 10 May 2023 18:01:04 -0300 Subject: [PATCH 060/133] Remove obsolete comment --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index d9d05ce0..e34feb9e 100644 --- a/webui.py +++ b/webui.py @@ -167,7 +167,7 @@ def download_model(): def run_model(): os.chdir("text-generation-webui") - run_cmd(f"python server.py {CMD_FLAGS}", environment=True) # put your flags here! + run_cmd(f"python server.py {CMD_FLAGS}", environment=True) if __name__ == "__main__": From 1309cdd25737417afcad2d972c72bbb66859e4c3 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 10 May 2023 18:03:12 -0300 Subject: [PATCH 061/133] Add a space --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index e34feb9e..574de296 100644 --- a/webui.py +++ b/webui.py @@ -12,6 +12,7 @@ script_dir = os.getcwd() # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui CMD_FLAGS = '--chat --model-menu' + def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): # Use the conda environment if environment: From 0bcd5b689402d7cda7f8ef7ab7a036e483e1886a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 18 May 2023 10:56:49 -0300 Subject: [PATCH 062/133] Soothe anxious users --- webui.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 574de296..d268d0cc 100644 --- a/webui.py +++ b/webui.py @@ -139,6 +139,7 @@ def update_dependencies(): # Compile and install GPTQ-for-LLaMa if os.path.exists('setup_cuda.py'): os.rename("setup_cuda.py", "setup.py") + run_cmd("python -m pip install .", environment=True) # Wheel installation can fail while in the build directory of a package with the same name @@ -147,17 +148,22 @@ def update_dependencies(): # If the path does not exist, then the install failed quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") if not glob.glob(quant_cuda_path_regex): - print("ERROR: GPTQ CUDA kernel compilation failed.") # Attempt installation via alternative, Windows-specific method if sys.platform.startswith("win"): - print("Attempting installation with wheel.") + print("\n\n*******************************************************************") + print("* WARNING: GPTQ-for-LLaMa compilation failed, but this FINE and can be ignored!") + print("* The installer will proceed to install a pre-compiled wheel.") + print("*******************************************************************\n\n") + result = run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl", environment=True) if result.returncode == 0: print("Wheel installation success!") else: print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.") else: + print("ERROR: GPTQ CUDA kernel compilation failed.") print("You will not be able to use GPTQ-based models.") + print("Continuing with install..") From 07510a24149cbd6fd33df0c4a440d60b9783a18e Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 18 May 2023 10:58:37 -0300 Subject: [PATCH 063/133] Change a message --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index d268d0cc..ddd95e0c 100644 --- a/webui.py +++ b/webui.py @@ -151,7 +151,7 @@ def update_dependencies(): # Attempt installation via alternative, Windows-specific method if sys.platform.startswith("win"): print("\n\n*******************************************************************") - print("* WARNING: GPTQ-for-LLaMa compilation failed, but this FINE and can be ignored!") + print("* WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!") print("* The installer will proceed to install a pre-compiled wheel.") print("*******************************************************************\n\n") From 4ef2de348695ccdb616ad5a7d9b3540e59de9197 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 18 May 2023 10:46:04 -0500 Subject: [PATCH 064/133] Fix dependencies downgrading from gptq install (#61) --- webui.py | 1 - 1 file changed, 1 deletion(-) diff --git a/webui.py b/webui.py index ddd95e0c..eac630fc 100644 --- a/webui.py +++ b/webui.py @@ -127,7 +127,6 @@ def update_dependencies(): # Install GPTQ-for-LLaMa dependencies os.chdir("GPTQ-for-LLaMa") run_cmd("git pull", assert_success=True, environment=True) - run_cmd("python -m pip install -r requirements.txt", assert_success=True, environment=True) # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa if sys.platform.startswith("linux"): From 996c49daa75abf3b485e1b6b3074b117084c9e3e Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 25 May 2023 10:50:20 -0300 Subject: [PATCH 065/133] Remove bitsandbytes installation step Following https://github.com/oobabooga/text-generation-webui/commit/548f05e106ec41aa58adc6bcb1ff88116c0750c4 --- webui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index eac630fc..faf0452d 100644 --- a/webui.py +++ b/webui.py @@ -72,9 +72,9 @@ def install_dependencies(): # Clone webui to our computer run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) - if sys.platform.startswith("win"): - # Fix a bitsandbytes compatibility issue with Windows - run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True) + # if sys.platform.startswith("win"): + # # Fix a bitsandbytes compatibility issue with Windows + # run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True) # Install the webui dependencies update_dependencies() From c8ce2e777b2360d8ab5b245efeebf801358cf200 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 25 May 2023 10:57:52 -0300 Subject: [PATCH 066/133] Add instructions for CPU mode users --- webui.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/webui.py b/webui.py index faf0452d..70d473ac 100644 --- a/webui.py +++ b/webui.py @@ -58,6 +58,9 @@ def install_dependencies(): print() gpuchoice = input("Input> ").lower() + if gpuchoice == "d": + print("\nOnce the installation ends, make sure to open webui.py with a text editor and add the --cpu flag to CMD_FLAGS.\n") + # Install the version of PyTorch needed if gpuchoice == "a": run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia", assert_success=True, environment=True) From b1b3bb6923400acd695bf255d8dc1e4cebfcdede Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 25 May 2023 09:15:05 -0500 Subject: [PATCH 067/133] Improve environment isolation (#68) --- cmd_linux.sh | 7 +++++++ cmd_macos.sh | 7 +++++++ cmd_windows.bat | 7 +++++++ start_linux.sh | 7 +++++++ start_macos.sh | 7 +++++++ start_windows.bat | 7 +++++++ update_linux.sh | 7 +++++++ update_macos.sh | 7 +++++++ update_windows.bat | 7 +++++++ 9 files changed, 63 insertions(+) diff --git a/cmd_linux.sh b/cmd_linux.sh index 08717db5..0a4ef620 100644 --- a/cmd_linux.sh +++ b/cmd_linux.sh @@ -8,5 +8,12 @@ if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can no CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate env bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") diff --git a/cmd_macos.sh b/cmd_macos.sh index 08717db5..0a4ef620 100644 --- a/cmd_macos.sh +++ b/cmd_macos.sh @@ -8,5 +8,12 @@ if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can no CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate env bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") diff --git a/cmd_windows.bat b/cmd_windows.bat index 4a5f4f0e..606ff485 100644 --- a/cmd_windows.bat +++ b/cmd_windows.bat @@ -14,6 +14,13 @@ set TEMP=%cd%\installer_files set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + @rem activate installer env call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) diff --git a/start_linux.sh b/start_linux.sh index 5359179c..89d00c1c 100644 --- a/start_linux.sh +++ b/start_linux.sh @@ -49,6 +49,13 @@ if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then exit fi +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate installer env source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" diff --git a/start_macos.sh b/start_macos.sh index 5686ce22..04ab07db 100644 --- a/start_macos.sh +++ b/start_macos.sh @@ -49,6 +49,13 @@ if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then exit fi +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate installer env source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" diff --git a/start_windows.bat b/start_windows.bat index 9af086e6..063942fe 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -46,6 +46,13 @@ if not exist "%INSTALL_ENV_DIR%" ( @rem check if conda environment was actually created if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + @rem activate installer env call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) diff --git a/update_linux.sh b/update_linux.sh index 5966f9d9..f7be8440 100644 --- a/update_linux.sh +++ b/update_linux.sh @@ -8,6 +8,13 @@ if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can no CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate installer env source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" diff --git a/update_macos.sh b/update_macos.sh index 5966f9d9..f7be8440 100644 --- a/update_macos.sh +++ b/update_macos.sh @@ -8,6 +8,13 @@ if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can no CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + # activate installer env source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" diff --git a/update_windows.bat b/update_windows.bat index 4701ab48..a44e2188 100644 --- a/update_windows.bat +++ b/update_windows.bat @@ -14,6 +14,13 @@ set TEMP=%cd%\installer_files set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + @rem activate installer env call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) From be98e7433743e6f42a8212a6a34f2830618c98aa Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 31 May 2023 12:41:03 -0500 Subject: [PATCH 068/133] Install older bitsandbytes on older gpus + fix llama-cpp-python issue (#75) --- webui.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 70d473ac..6f8610d6 100644 --- a/webui.py +++ b/webui.py @@ -7,6 +7,7 @@ import subprocess import sys script_dir = os.getcwd() +conda_env_path = os.path.join(script_dir, "installer_files", "env") # Use this to set your command-line flags. For the full list, see: # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui @@ -16,7 +17,6 @@ CMD_FLAGS = '--chat --model-menu' def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): # Use the conda environment if environment: - conda_env_path = os.path.join(script_dir, "installer_files", "env") if sys.platform.startswith("win"): conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd @@ -98,6 +98,20 @@ def update_dependencies(): if os.path.exists(extension_req_path): run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) + # Latest bitsandbytes requires minimum compute 7.0 + nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" + min_compute = 70 + compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) + old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" + if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): + old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 + print("\n\nWARNING: GPU with compute < 7.0 detected!") + if old_bnb_install: + print("Older version of bitsandbytes has been installed to maintain compatibility.") + print("You will be unable to use --load-in-4bit!\n\n") + else: + print("You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n\n") + # The following dependencies are for CUDA, not CPU # Check if the package cpuonly exists to determine if torch uses CUDA or not cpuonly_exist = run_cmd("conda list cpuonly | grep cpuonly", environment=True, capture_output=True).returncode == 0 @@ -200,5 +214,10 @@ if __name__ == "__main__": download_model() os.chdir(script_dir) + # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist + conda_path_bin = os.path.join(conda_env_path, "bin") + if not os.path.exists(conda_path_bin): + os.mkdir(conda_path_bin) + # Run the model with webui run_model() From 540563530520bb65316ced549586c2dda8f48a00 Mon Sep 17 00:00:00 2001 From: Sam Date: Wed, 31 May 2023 13:41:54 -0400 Subject: [PATCH 069/133] Install pre-compiled wheels for Linux (#74) --- webui.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index 6f8610d6..aa6f6ab7 100644 --- a/webui.py +++ b/webui.py @@ -164,14 +164,18 @@ def update_dependencies(): # If the path does not exist, then the install failed quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") if not glob.glob(quant_cuda_path_regex): - # Attempt installation via alternative, Windows-specific method - if sys.platform.startswith("win"): + # Attempt installation via alternative, Windows/Linux-specific method + if sys.platform.startswith("win") or sys.platform.startswith("linux"): print("\n\n*******************************************************************") print("* WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!") print("* The installer will proceed to install a pre-compiled wheel.") print("*******************************************************************\n\n") - result = run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl", environment=True) + url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl" + if sys.platform.startswith("linux"): + url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl" + + result = run_cmd("python -m pip install " + url, environment=True) if result.returncode == 0: print("Wheel installation success!") else: From 97bc7e3fb6f52aeb61e23ec18a8cff560ed6391c Mon Sep 17 00:00:00 2001 From: gavin660 <43652996+gavin660@users.noreply.github.com> Date: Wed, 31 May 2023 10:43:22 -0700 Subject: [PATCH 070/133] Adds functionality for user to set flags via environment variable (#59) --- webui.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/webui.py b/webui.py index aa6f6ab7..4c2354d5 100644 --- a/webui.py +++ b/webui.py @@ -14,6 +14,14 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") CMD_FLAGS = '--chat --model-menu' +# Allows users to set flags in "OOBABOOGA_FLAGS" environment variable +if "OOBABOOGA_FLAGS" in os.environ: + CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] + print("\33[1;32mFlags have been taken from enivroment Variable 'OOBABOOGA_FLAGS'\33[0m") + print(CMD_FLAGS) + print("\33[1;32mTo use flags from webui.py remove 'OOBABOOGA_FLAGS'\33[0m") + + def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): # Use the conda environment if environment: From dea1bf3d04bbf36f07afaf7254aa02858e50e7e1 Mon Sep 17 00:00:00 2001 From: Sam Date: Wed, 31 May 2023 13:44:36 -0400 Subject: [PATCH 071/133] Parse g++ version instead of using string matching (#72) --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 4c2354d5..c0cfadc0 100644 --- a/webui.py +++ b/webui.py @@ -155,8 +155,8 @@ def update_dependencies(): # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa if sys.platform.startswith("linux"): - gxx_output = run_cmd("g++ --version", environment=True, capture_output=True) - if gxx_output.returncode != 0 or b"g++ (GCC) 12" in gxx_output.stdout: + gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) + if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: # Install the correct version of g++ run_cmd("conda install -y -k gxx_linux-64=11.2.0", environment=True) From 2e53caa80682dae2ba9e8572fc497bd544d1ef77 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 31 May 2023 16:28:36 -0300 Subject: [PATCH 072/133] Create LICENSE --- LICENSE | 661 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 661 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..0ad25db4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. From 290a3374e491e6a5609ecc4ed115d5ef8caa1d12 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 1 Jun 2023 01:20:56 -0300 Subject: [PATCH 073/133] Don't download a model during installation And some other updates/minor improvements --- INSTRUCTIONS.TXT | 11 +++----- start_linux.sh | 3 --- start_macos.sh | 3 --- start_windows.bat | 3 --- webui.py | 67 +++++++++++++++++++++++++++-------------------- 5 files changed, 42 insertions(+), 45 deletions(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index 9cd06c29..eaf24ef9 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -8,24 +8,21 @@ everything for you. To launch the web UI in the future after it is already installed, run this same "start" script. -# Updating +# Updating the web UI Run the "update" script. This will only install the updates, so it should be much faster than the initial installation. -May need to delete the 'text-generation-webui\repositories\GPTQ-for-LLaMa' -folder if GPTQ-for-LLaMa needs to be updated. - # Adding flags like --chat, --notebook, etc Edit the "webui.py" script using a text editor and add the desired flags to the CMD_FLAGS variable at the top. It should look like this: -CMD_FLAGS = '--chat --model-menu' +CMD_FLAGS = '--chat' -For instance, to add the --notebook flag, change it to +For instance, to add the --api flag, change it to -CMD_FLAGS = '--notebook --model-menu' +CMD_FLAGS = '--chat --api' # Running an interactive shell diff --git a/start_linux.sh b/start_linux.sh index 89d00c1c..e699a836 100644 --- a/start_linux.sh +++ b/start_linux.sh @@ -62,6 +62,3 @@ conda activate "$INSTALL_ENV_DIR" # setup installer env python webui.py - -echo -echo "Done!" diff --git a/start_macos.sh b/start_macos.sh index 04ab07db..cd4aaa9e 100644 --- a/start_macos.sh +++ b/start_macos.sh @@ -62,6 +62,3 @@ conda activate "$INSTALL_ENV_DIR" # setup installer env python webui.py - -echo -echo "Done!" diff --git a/start_windows.bat b/start_windows.bat index 063942fe..64cbd835 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -59,8 +59,5 @@ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( @rem setup installer env call python webui.py -echo. -echo Done! - :end pause diff --git a/webui.py b/webui.py index c0cfadc0..bd9f9859 100644 --- a/webui.py +++ b/webui.py @@ -11,15 +11,26 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") # Use this to set your command-line flags. For the full list, see: # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui -CMD_FLAGS = '--chat --model-menu' +CMD_FLAGS = '--chat' # Allows users to set flags in "OOBABOOGA_FLAGS" environment variable if "OOBABOOGA_FLAGS" in os.environ: CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] - print("\33[1;32mFlags have been taken from enivroment Variable 'OOBABOOGA_FLAGS'\33[0m") + print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':") print(CMD_FLAGS) - print("\33[1;32mTo use flags from webui.py remove 'OOBABOOGA_FLAGS'\33[0m") + print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n") + + +def print_big_message(message): + message = message.strip() + lines = message.split('\n') + print("\n\n*******************************************************************") + for line in lines: + if line.strip() != '': + print("*", line) + + print("*******************************************************************\n\n") def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): @@ -31,14 +42,15 @@ def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, else: conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd - + # Run shell commands result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) - + # Assert the command ran successfully if assert_success and result.returncode != 0: print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") sys.exit() + return result @@ -48,7 +60,7 @@ def check_env(): if not conda_exist: print("Conda is not installed. Exiting...") sys.exit() - + # Ensure this is a new environment and not the base environment if os.environ["CONDA_DEFAULT_ENV"] == "base": print("Create an environment for this project and activate it. Exiting...") @@ -86,7 +98,7 @@ def install_dependencies(): # if sys.platform.startswith("win"): # # Fix a bitsandbytes compatibility issue with Windows # run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True) - + # Install the webui dependencies update_dependencies() @@ -101,7 +113,7 @@ def update_dependencies(): for extension in extensions: if extension in ['superbooga']: # No wheels available for dependencies continue - + extension_req_path = os.path.join("extensions", extension, "requirements.txt") if os.path.exists(extension_req_path): run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) @@ -113,12 +125,14 @@ def update_dependencies(): old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 - print("\n\nWARNING: GPU with compute < 7.0 detected!") + message = "\n\nWARNING: GPU with compute < 7.0 detected!\n" if old_bnb_install: - print("Older version of bitsandbytes has been installed to maintain compatibility.") - print("You will be unable to use --load-in-4bit!\n\n") + message += "Older version of bitsandbytes has been installed to maintain compatibility.\n" + message += "You will be unable to use --load-in-4bit!\n" else: - print("You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n\n") + message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n" + + print_big_message(message) # The following dependencies are for CUDA, not CPU # Check if the package cpuonly exists to determine if torch uses CUDA or not @@ -143,16 +157,16 @@ def update_dependencies(): if not os.path.exists("repositories/"): os.mkdir("repositories") - + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization os.chdir("repositories") if not os.path.exists("GPTQ-for-LLaMa/"): run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) - + # Install GPTQ-for-LLaMa dependencies os.chdir("GPTQ-for-LLaMa") run_cmd("git pull", assert_success=True, environment=True) - + # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa if sys.platform.startswith("linux"): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) @@ -163,22 +177,18 @@ def update_dependencies(): # Compile and install GPTQ-for-LLaMa if os.path.exists('setup_cuda.py'): os.rename("setup_cuda.py", "setup.py") - + run_cmd("python -m pip install .", environment=True) - + # Wheel installation can fail while in the build directory of a package with the same name os.chdir("..") - + # If the path does not exist, then the install failed quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") if not glob.glob(quant_cuda_path_regex): # Attempt installation via alternative, Windows/Linux-specific method if sys.platform.startswith("win") or sys.platform.startswith("linux"): - print("\n\n*******************************************************************") - print("* WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!") - print("* The installer will proceed to install a pre-compiled wheel.") - print("*******************************************************************\n\n") - + print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.") url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl" if sys.platform.startswith("linux"): url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl" @@ -191,7 +201,7 @@ def update_dependencies(): else: print("ERROR: GPTQ CUDA kernel compilation failed.") print("You will not be able to use GPTQ-based models.") - + print("Continuing with install..") @@ -200,7 +210,7 @@ def download_model(): run_cmd("python download-model.py", environment=True) -def run_model(): +def launch_webui(): os.chdir("text-generation-webui") run_cmd(f"python server.py {CMD_FLAGS}", environment=True) @@ -223,13 +233,12 @@ if __name__ == "__main__": # Check if a model has been downloaded yet if len(glob.glob("text-generation-webui/models/*/")) == 0: - download_model() - os.chdir(script_dir) + print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.") # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist conda_path_bin = os.path.join(conda_env_path, "bin") if not os.path.exists(conda_path_bin): os.mkdir(conda_path_bin) - # Run the model with webui - run_model() + # Launch the webui + launch_webui() From 248ef323588cfc4757be22c5dcc5292929a60bd2 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 1 Jun 2023 01:38:48 -0300 Subject: [PATCH 074/133] Print a big message for CPU users --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index bd9f9859..90dfa1e4 100644 --- a/webui.py +++ b/webui.py @@ -79,7 +79,7 @@ def install_dependencies(): gpuchoice = input("Input> ").lower() if gpuchoice == "d": - print("\nOnce the installation ends, make sure to open webui.py with a text editor and add the --cpu flag to CMD_FLAGS.\n") + print_big_message("Once the installation ends, make sure to open webui.py with a text editor\nand add the --cpu flag to CMD_FLAGS.") # Install the version of PyTorch needed if gpuchoice == "a": From 55403358190a461e2751f85b4f94162aca27892f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:01:19 -0300 Subject: [PATCH 075/133] Better way to detect if a model has been downloaded --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 90dfa1e4..00b0fbd6 100644 --- a/webui.py +++ b/webui.py @@ -232,7 +232,7 @@ if __name__ == "__main__": os.chdir(script_dir) # Check if a model has been downloaded yet - if len(glob.glob("text-generation-webui/models/*/")) == 0: + if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.") # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist From 522b01d051ea7372bd56cb3ac3de044acf6fc313 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:05:29 -0300 Subject: [PATCH 076/133] Grammar --- INSTRUCTIONS.TXT | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index eaf24ef9..c25e4a96 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -6,7 +6,7 @@ Run the "start" script. If all goes right, it should take care of everything for you. To launch the web UI in the future after it is already installed, run -this same "start" script. +the same "start" script. # Updating the web UI From 53496ffa80a33fdf897b1235d3d7326d74c64946 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 5 Jun 2023 17:15:31 -0300 Subject: [PATCH 077/133] Create stale.yml --- .github/workflows/stale.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000..ce603a4f --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,22 @@ +name: Close inactive issues +on: + schedule: + - cron: "10 23 * * *" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v5 + with: + stale-issue-message: "" + close-issue-message: "This issue has been closed due to inactivity for 30 days. If you believe it is still relevant, please leave a comment below." + days-before-issue-stale: 30 + days-before-issue-close: 0 + stale-issue-label: "stale" + days-before-pr-stale: -1 + days-before-pr-close: -1 + repo-token: ${{ secrets.GITHUB_TOKEN }} From c42f183d3f7c4518a0ce846eac7935613a6af44b Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Mon, 12 Jun 2023 22:04:15 -0500 Subject: [PATCH 078/133] Installer for WSL (#78) --- INSTRUCTIONS-WSL.TXT | 74 ++++++++++++++++++++++++++++++++++++ cmd_wsl.bat | 11 ++++++ generate_zips.sh | 7 ++-- start_wsl.bat | 11 ++++++ update_wsl.bat | 11 ++++++ wsl.sh | 89 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 200 insertions(+), 3 deletions(-) create mode 100644 INSTRUCTIONS-WSL.TXT create mode 100644 cmd_wsl.bat create mode 100644 start_wsl.bat create mode 100644 update_wsl.bat create mode 100644 wsl.sh diff --git a/INSTRUCTIONS-WSL.TXT b/INSTRUCTIONS-WSL.TXT new file mode 100644 index 00000000..b2dabdd3 --- /dev/null +++ b/INSTRUCTIONS-WSL.TXT @@ -0,0 +1,74 @@ +Thank you for downloading oobabooga/text-generation-webui. + +# WSL setup + +If you do not have WSL installed, see here: +https://learn.microsoft.com/en-us/windows/wsl/install + +If you want to install Linux to a drive other than C +Open powershell and enter these commands: + +cd D:\Path\To\Linux +$ProgressPreference = 'SilentlyContinue' +Invoke-WebRequest -Uri -OutFile Linux.appx -UseBasicParsing +mv Linux.appx Linux.zip + +Then open Linux.zip and you should see several .appx files inside. +The one with _x64.appx contains the exe installer that you need. +Extract the contents of that _x64.appx file and run .exe to install. + +Linux Distro URLs: +https://learn.microsoft.com/en-us/windows/wsl/install-manual#downloading-distributions + +****************************************************************************** +*ENSURE THAT THE WSL LINUX DISTRO THAT YOU WISH TO USE IS SET AS THE DEFAULT!* +****************************************************************************** + +Do this by using these commands: +wsl -l +wsl -s + +# Web UI Installation + +Run the "start" script. By default it will install the web UI in WSL: +/home/{username}/text-gen-install + +To launch the web UI in the future after it is already installed, run +the same "start" script. Ensure that webui.py and wsl.sh are next to it! + +# Updating the web UI + +Run the "update" script. This will only install the updates, so it should +be much faster than the initial installation. + +You can also run "wsl.sh update" in WSL. + +# Adding flags like --chat, --notebook, etc + +Edit the "webui.py" script using a text editor and add the desired flags +to the CMD_FLAGS variable at the top. It should look like this: + +CMD_FLAGS = '--chat' + +For instance, to add the --api flag, change it to + +CMD_FLAGS = '--chat --api' + +The "start" and "update" scripts will copy the edited "webui.py" to WSL +to be used by the web UI. + +# Running an interactive shell + +To run an interactive shell in the miniconda environment, run the "cmd" +script. This is useful for installing additional requirements manually. + +You can also run "wsl.sh cmd" in WSL. + +# Changing the default install location + +To change this, you will need to edit the scripts as follows: +wsl.sh: line ~22 INSTALL_DIR="/path/to/install/dir" + +Keep in mind that there is a long-standing bug in WSL that significantly +slows drive read/write speeds when using a physical drive as opposed to +the virtual one that Linux is installed in. diff --git a/cmd_wsl.bat b/cmd_wsl.bat new file mode 100644 index 00000000..f9f4348a --- /dev/null +++ b/cmd_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd" + +:end +pause diff --git a/generate_zips.sh b/generate_zips.sh index 1bfe7247..2f852182 100644 --- a/generate_zips.sh +++ b/generate_zips.sh @@ -1,5 +1,6 @@ -mkdir oobabooga_{windows,linux,macos} -for p in windows macos linux; do - cp {*$p*\.*,webui.py,INSTRUCTIONS.TXT} oobabooga_$p; +mkdir oobabooga_{windows,linux,macos,wsl} +for p in windows macos linux wsl; do + if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.TXT} oobabooga_$p; + else cp {*$p*\.*,webui.py,INSTRUCTIONS.TXT} oobabooga_$p; fi zip -r oobabooga_$p.zip oobabooga_$p; done diff --git a/start_wsl.bat b/start_wsl.bat new file mode 100644 index 00000000..41fa572f --- /dev/null +++ b/start_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh" + +:end +pause diff --git a/update_wsl.bat b/update_wsl.bat new file mode 100644 index 00000000..36d019a8 --- /dev/null +++ b/update_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script calling wsl.sh with 'update' will run updater +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh update" + +:end +pause diff --git a/wsl.sh b/wsl.sh new file mode 100644 index 00000000..b89c788c --- /dev/null +++ b/wsl.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# detect if build-essential is missing or broken +if ! dpkg-query -W -f'${Status}' "build-essential" 2>/dev/null | grep -q "ok installed"; then +echo "build-essential not found or broken! + +A C++ compiler is required to build needed Python packages! +To install one, run cmd_wsl.bat and enter these commands: + +sudo apt-get update +sudo apt-get install build-essential +" +read -n1 -p "Continue the installer anyway? [y,n]" EXIT_PROMPT +# only continue if user inputs 'y' else exit +if ! [[ $EXIT_PROMPT == "Y" || $EXIT_PROMPT == "y" ]]; then exit; fi +fi + +# deactivate any currently active conda env +conda deactivate 2> /dev/null + +# config unlike other scripts, can't use current directory due to file IO bug in WSL, needs to be in virtual drive +INSTALL_DIR="$HOME/text-gen-install" +CONDA_ROOT_PREFIX="$INSTALL_DIR/installer_files/conda" +INSTALL_ENV_DIR="$INSTALL_DIR/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Linux-x86_64.sh" +conda_exists="F" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# /usr/lib/wsl/lib needs to be added to LD_LIBRARY_PATH to fix years-old bug in WSL where GPU drivers aren't linked properly +export LD_LIBRARY_PATH="$CUDA_HOME/lib:/usr/lib/wsl/lib:$LD_LIBRARY_PATH" + +# open bash cli if called with 'wsl.sh cmd' with workarounds for existing conda +if [ "$1" == "cmd" ]; then + exec bash --init-file <(echo ". ~/.bashrc; conda deactivate 2> /dev/null; cd $INSTALL_DIR || cd $HOME; source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh; conda activate $INSTALL_ENV_DIR") + exit +fi + +if [[ "$INSTALL_DIR" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# create install dir if missing and copy webui.py to install dir to maintain functionality without edit +if [ ! -d "$INSTALL_DIR" ]; then mkdir -p "$INSTALL_DIR" || exit; fi +cp -u "./webui.py" "$INSTALL_DIR" + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +cd $INSTALL_DIR + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env update env if called with 'wsl.sh update' +case "$1" in +("update") python webui.py --update;; +(*) python webui.py;; +esac From b2483e28d1608bcb78a69c293fa9f14206cbc599 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 17 Jun 2023 17:09:22 -0500 Subject: [PATCH 079/133] Check for special characters in path on Windows (#81) Display warning message if detected --- webui.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/webui.py b/webui.py index 00b0fbd6..52993184 100644 --- a/webui.py +++ b/webui.py @@ -68,6 +68,15 @@ def check_env(): def install_dependencies(): + # Check for special characters in installation path on Windows + if sys.platform.startswith("win"): + # punctuation contains: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ + from string import punctuation + # Allow some characters: _-:\/.'" + special_characters = punctuation.translate({ord(char): None for char in '_-:\\/.\'"'}) + if any(char in script_dir for char in special_characters): + print_big_message("WARNING: Special characters were detected in the installation path!\n This can cause the installation to fail!") + # Select your GPU or, choose to run in CPU mode print("What is your GPU") print() From 657049d7d01097c2e23508664d25a74374fc0aa1 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 17 Jun 2023 17:09:42 -0500 Subject: [PATCH 080/133] Fix cmd_macos.sh (#82) MacOS version of Bash does not support process substitution --- cmd_macos.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd_macos.sh b/cmd_macos.sh index 0a4ef620..0cec16e9 100644 --- a/cmd_macos.sh +++ b/cmd_macos.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing env if needed +conda deactivate 2> /dev/null + # config CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" @@ -16,4 +19,6 @@ export CUDA_PATH="$INSTALL_ENV_DIR" export CUDA_HOME="$CUDA_PATH" # activate env -bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") +source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh +conda activate $INSTALL_ENV_DIR +exec bash --norc From b1d05cbbf66873210a8e0868825c1de5a38d27ef Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 17 Jun 2023 17:10:36 -0500 Subject: [PATCH 081/133] Install exllama (#83) * Install exllama * Handle updating exllama --- webui.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 52993184..12e86571 100644 --- a/webui.py +++ b/webui.py @@ -167,8 +167,17 @@ def update_dependencies(): if not os.path.exists("repositories/"): os.mkdir("repositories") - # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization os.chdir("repositories") + + # Install or update exllama as needed + if not os.path.exists("exllama/"): + run_cmd("git clone https://github.com/turboderp/exllama.git", environment=True) + else: + os.chdir("exllama") + run_cmd("git pull", environment=True) + os.chdir("..") + + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization if not os.path.exists("GPTQ-for-LLaMa/"): run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) From 9bb2fc8cd782be5930b6789f2171cb73effc9be3 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Tue, 20 Jun 2023 14:39:23 -0500 Subject: [PATCH 082/133] Install Pytorch through pip instead of Conda (#84) --- start_linux.sh | 2 +- start_macos.sh | 2 +- start_windows.bat | 2 +- webui.py | 24 ++++++++++++++---------- wsl.sh | 2 +- 5 files changed, 18 insertions(+), 14 deletions(-) diff --git a/start_linux.sh b/start_linux.sh index e699a836..dc37f612 100644 --- a/start_linux.sh +++ b/start_linux.sh @@ -16,7 +16,7 @@ esac INSTALL_DIR="$(pwd)/installer_files" CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" -MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Linux-${OS_ARCH}.sh" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-${OS_ARCH}.sh" conda_exists="F" # figure out whether git and conda needs to be installed diff --git a/start_macos.sh b/start_macos.sh index cd4aaa9e..a813edb3 100644 --- a/start_macos.sh +++ b/start_macos.sh @@ -16,7 +16,7 @@ esac INSTALL_DIR="$(pwd)/installer_files" CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" -MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-MacOSX-${OS_ARCH}.sh" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-MacOSX-${OS_ARCH}.sh" conda_exists="F" # figure out whether git and conda needs to be installed diff --git a/start_windows.bat b/start_windows.bat index 64cbd835..f259f606 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -14,7 +14,7 @@ set TEMP=%cd%\installer_files set INSTALL_DIR=%cd%\installer_files set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env -set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Windows-x86_64.exe +set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Windows-x86_64.exe set conda_exists=F @rem figure out whether git and conda needs to be installed diff --git a/webui.py b/webui.py index 12e86571..f5bd671e 100644 --- a/webui.py +++ b/webui.py @@ -92,21 +92,18 @@ def install_dependencies(): # Install the version of PyTorch needed if gpuchoice == "a": - run_cmd("conda install -y -k pytorch[version=2,build=py3.10_cuda11.7*] torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit ninja git -c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia", assert_success=True, environment=True) + run_cmd('conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) elif gpuchoice == "b": print("AMD GPUs are not supported. Exiting...") sys.exit() elif gpuchoice == "c" or gpuchoice == "d": - run_cmd("conda install -y -k pytorch torchvision torchaudio cpuonly git -c pytorch", assert_success=True, environment=True) + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) else: print("Invalid choice. Exiting...") sys.exit() # Clone webui to our computer run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) - # if sys.platform.startswith("win"): - # # Fix a bitsandbytes compatibility issue with Windows - # run_cmd("python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl", assert_success=True, environment=True) # Install the webui dependencies update_dependencies() @@ -144,9 +141,12 @@ def update_dependencies(): print_big_message(message) # The following dependencies are for CUDA, not CPU - # Check if the package cpuonly exists to determine if torch uses CUDA or not - cpuonly_exist = run_cmd("conda list cpuonly | grep cpuonly", environment=True, capture_output=True).returncode == 0 - if cpuonly_exist: + # Parse output of 'pip show torch' to determine torch version + torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) + torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0] + + # Check for '+cu' in version string to determine if torch uses CUDA or not check for pytorch-cuda as well for backwards compatibility + if '+cu' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: return # Finds the path to your dependencies @@ -161,8 +161,8 @@ def update_dependencies(): sys.exit() # Fix a bitsandbytes compatibility issue with Linux - if sys.platform.startswith("linux"): - shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so")) + # if sys.platform.startswith("linux"): + # shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so")) if not os.path.exists("repositories/"): os.mkdir("repositories") @@ -177,6 +177,10 @@ def update_dependencies(): run_cmd("git pull", environment=True) os.chdir("..") + # Fix build issue with exllama in Linux/WSL + if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): + run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization if not os.path.exists("GPTQ-for-LLaMa/"): run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) diff --git a/wsl.sh b/wsl.sh index b89c788c..2d5d5405 100644 --- a/wsl.sh +++ b/wsl.sh @@ -22,7 +22,7 @@ conda deactivate 2> /dev/null INSTALL_DIR="$HOME/text-gen-install" CONDA_ROOT_PREFIX="$INSTALL_DIR/installer_files/conda" INSTALL_ENV_DIR="$INSTALL_DIR/installer_files/env" -MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-Linux-x86_64.sh" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-x86_64.sh" conda_exists="F" # environment isolation From 5cbc0b28f2d173887cfc213102e2e60fb784452a Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Tue, 20 Jun 2023 18:21:10 -0500 Subject: [PATCH 083/133] Workaround for Peft not updating their package version on the git repo (#88) * Workaround for Peft not updating their git package version * Update webui.py --------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com> --- webui.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/webui.py b/webui.py index f5bd671e..0c224958 100644 --- a/webui.py +++ b/webui.py @@ -113,6 +113,21 @@ def update_dependencies(): os.chdir("text-generation-webui") run_cmd("git pull", assert_success=True, environment=True) + # Workaround for git+ packages not updating properly + with open("requirements.txt") as f: + requirements = f.read().splitlines() + git_requirements = [req for req in requirements if req.startswith("git+")] + + # Loop through each "git+" requirement and uninstall it + for req in git_requirements: + # Extract the package name from the "git+" requirement + url = req.replace("git+", "") + package_name = url.split("/")[-1].split("@")[0] + + # Uninstall the package using pip + run_cmd("python -m pip uninstall " + package_name, environment=True) + print(f"Uninstalled {package_name}") + # Installs/Updates dependencies from all requirements.txt run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) extensions = next(os.walk("extensions"))[1] From c0a1baa46e849b6afec247f0d582fb58b8d749d1 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 20 Jun 2023 20:23:21 -0300 Subject: [PATCH 084/133] Minor changes --- webui.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/webui.py b/webui.py index 0c224958..f551861d 100644 --- a/webui.py +++ b/webui.py @@ -1,7 +1,6 @@ import argparse import glob import os -import shutil import site import subprocess import sys @@ -72,6 +71,7 @@ def install_dependencies(): if sys.platform.startswith("win"): # punctuation contains: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ from string import punctuation + # Allow some characters: _-:\/.'" special_characters = punctuation.translate({ord(char): None for char in '_-:\\/.\'"'}) if any(char in script_dir for char in special_characters): @@ -117,13 +117,13 @@ def update_dependencies(): with open("requirements.txt") as f: requirements = f.read().splitlines() git_requirements = [req for req in requirements if req.startswith("git+")] - + # Loop through each "git+" requirement and uninstall it for req in git_requirements: # Extract the package name from the "git+" requirement url = req.replace("git+", "") package_name = url.split("/")[-1].split("@")[0] - + # Uninstall the package using pip run_cmd("python -m pip uninstall " + package_name, environment=True) print(f"Uninstalled {package_name}") @@ -159,7 +159,7 @@ def update_dependencies(): # Parse output of 'pip show torch' to determine torch version torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0] - + # Check for '+cu' in version string to determine if torch uses CUDA or not check for pytorch-cuda as well for backwards compatibility if '+cu' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: return @@ -183,7 +183,7 @@ def update_dependencies(): os.mkdir("repositories") os.chdir("repositories") - + # Install or update exllama as needed if not os.path.exists("exllama/"): run_cmd("git clone https://github.com/turboderp/exllama.git", environment=True) @@ -191,11 +191,11 @@ def update_dependencies(): os.chdir("exllama") run_cmd("git pull", environment=True) os.chdir("..") - + # Fix build issue with exllama in Linux/WSL if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) - + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization if not os.path.exists("GPTQ-for-LLaMa/"): run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) From a2116e8b2ba661ba0ffc2b917b3c32135c0bfda4 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 20 Jun 2023 21:24:01 -0300 Subject: [PATCH 085/133] use uninstall -y --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index f551861d..89bfee0f 100644 --- a/webui.py +++ b/webui.py @@ -125,7 +125,7 @@ def update_dependencies(): package_name = url.split("/")[-1].split("@")[0] # Uninstall the package using pip - run_cmd("python -m pip uninstall " + package_name, environment=True) + run_cmd("python -m pip uninstall -y" + package_name, environment=True) print(f"Uninstalled {package_name}") # Installs/Updates dependencies from all requirements.txt From 80a615c3aeac14e9cb2d0f1e4b4dff7cd2ad7f6f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 20 Jun 2023 22:48:45 -0300 Subject: [PATCH 086/133] Add space --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 89bfee0f..e2c6093d 100644 --- a/webui.py +++ b/webui.py @@ -125,7 +125,7 @@ def update_dependencies(): package_name = url.split("/")[-1].split("@")[0] # Uninstall the package using pip - run_cmd("python -m pip uninstall -y" + package_name, environment=True) + run_cmd("python -m pip uninstall -y " + package_name, environment=True) print(f"Uninstalled {package_name}") # Installs/Updates dependencies from all requirements.txt From d1da22d7ee3de31090e70a552f108d5ba50d3502 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Tue, 20 Jun 2023 20:48:59 -0500 Subject: [PATCH 087/133] Fix -y from previous commit (#90) From 04cae3e5db554a21472cd32c43be279796795bab Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 21 Jun 2023 13:40:41 -0500 Subject: [PATCH 088/133] Remove bitsandbytes compatibility workaround (#91) New bnb does not need it. Commented out in case it is needed in the futute. --- webui.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/webui.py b/webui.py index e2c6093d..5a13ca56 100644 --- a/webui.py +++ b/webui.py @@ -140,20 +140,20 @@ def update_dependencies(): run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) # Latest bitsandbytes requires minimum compute 7.0 - nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" - min_compute = 70 - compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) - old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" - if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): - old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 - message = "\n\nWARNING: GPU with compute < 7.0 detected!\n" - if old_bnb_install: - message += "Older version of bitsandbytes has been installed to maintain compatibility.\n" - message += "You will be unable to use --load-in-4bit!\n" - else: - message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n" + # nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" + # min_compute = 70 + # compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) + # old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" + # if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): + # old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 + # message = "\n\nWARNING: GPU with compute < 7.0 detected!\n" + # if old_bnb_install: + # message += "Older version of bitsandbytes has been installed to maintain compatibility.\n" + # message += "You will be unable to use --load-in-4bit!\n" + # else: + # message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n" - print_big_message(message) + # print_big_message(message) # The following dependencies are for CUDA, not CPU # Parse output of 'pip show torch' to determine torch version From eac8450ef7aab37f527ef3c76a1ed183739c2bc0 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 24 Jun 2023 08:06:35 -0500 Subject: [PATCH 089/133] Move special character check to start script (#92) Also port print_big_message function to batch --- start_windows.bat | 22 ++++++++++++++++++++-- webui.py | 10 ---------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/start_windows.bat b/start_windows.bat index f259f606..4f5b0ba5 100644 --- a/start_windows.bat +++ b/start_windows.bat @@ -6,6 +6,13 @@ set PATH=%PATH%;%SystemRoot%\system32 echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end +@rem Check for special characters in installation path +set "SPCHARMESSAGE="WARNING: Special characters were detected in the installation path!" " This can cause the installation to fail!"" +echo "%CD%"| findstr /R /C:"[!#\$%&()\*+,;<=>?@\[\]\^`{|}~]" >nul && ( + call :PrintBigMessage %SPCHARMESSAGE% +) +set SPCHARMESSAGE= + @rem fix failed install when installing to a separate drive set TMP=%cd%\installer_files set TEMP=%cd%\installer_files @@ -39,8 +46,8 @@ if "%conda_exists%" == "F" ( @rem create the installer env if not exist "%INSTALL_ENV_DIR%" ( - echo Packages to install: %PACKAGES_TO_INSTALL% - call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) + echo Packages to install: %PACKAGES_TO_INSTALL% + call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) ) @rem check if conda environment was actually created @@ -59,5 +66,16 @@ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( @rem setup installer env call python webui.py +@rem below are functions for the script next line skips these during normal execution +goto end + +:PrintBigMessage +echo. && echo. +echo ******************************************************************* +for %%M in (%*) do echo * %%~M +echo ******************************************************************* +echo. && echo. +exit /b + :end pause diff --git a/webui.py b/webui.py index 5a13ca56..fe92c328 100644 --- a/webui.py +++ b/webui.py @@ -67,16 +67,6 @@ def check_env(): def install_dependencies(): - # Check for special characters in installation path on Windows - if sys.platform.startswith("win"): - # punctuation contains: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ - from string import punctuation - - # Allow some characters: _-:\/.'" - special_characters = punctuation.translate({ord(char): None for char in '_-:\\/.\'"'}) - if any(char in script_dir for char in special_characters): - print_big_message("WARNING: Special characters were detected in the installation path!\n This can cause the installation to fail!") - # Select your GPU or, choose to run in CPU mode print("What is your GPU") print() From 564a8c507fffc8b25a056d8930035c63da71fc7b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 7 Jul 2023 13:32:11 -0300 Subject: [PATCH 090/133] Don't launch chat mode by default --- webui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index fe92c328..004752a9 100644 --- a/webui.py +++ b/webui.py @@ -10,7 +10,8 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") # Use this to set your command-line flags. For the full list, see: # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui -CMD_FLAGS = '--chat' +# Example: CMD_FLAGS = '--chat --listen' +CMD_FLAGS = '' # Allows users to set flags in "OOBABOOGA_FLAGS" environment variable From bb79037ebd2d602614a6a19ff28ac4fcc98e1ded Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 7 Jul 2023 20:40:31 -0300 Subject: [PATCH 091/133] Fix wrong pytorch version on Linux+CPU It was installing nvidia wheels --- webui.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 004752a9..25c2d367 100644 --- a/webui.py +++ b/webui.py @@ -87,8 +87,14 @@ def install_dependencies(): elif gpuchoice == "b": print("AMD GPUs are not supported. Exiting...") sys.exit() - elif gpuchoice == "c" or gpuchoice == "d": + elif gpuchoice == "c": run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) + elif gpuchoice == "d": + if sys.platform.startswith("linux"): + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu", assert_success=True, environment=True) + else: + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) + else: print("Invalid choice. Exiting...") sys.exit() From 11a8fd1eb9f98489cbf111601b3bf317f8f79322 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 15 Jul 2023 23:31:33 -0500 Subject: [PATCH 092/133] Add cuBLAS llama-cpp-python wheel installation (#102) Parses requirements.txt using regex to determine required version. --- webui.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index 25c2d367..5049f700 100644 --- a/webui.py +++ b/webui.py @@ -1,5 +1,6 @@ import argparse import glob +import re import os import site import subprocess @@ -110,10 +111,10 @@ def update_dependencies(): os.chdir("text-generation-webui") run_cmd("git pull", assert_success=True, environment=True) - # Workaround for git+ packages not updating properly + # Workaround for git+ packages not updating properly Also store requirements.txt for later use with open("requirements.txt") as f: - requirements = f.read().splitlines() - git_requirements = [req for req in requirements if req.startswith("git+")] + textgen_requirements = f.read() + git_requirements = [req for req in textgen_requirements.splitlines() if req.startswith("git+")] # Loop through each "git+" requirement and uninstall it for req in git_requirements: @@ -161,6 +162,12 @@ def update_dependencies(): if '+cu' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: return + # Install llama-cpp-python built with cuBLAS support for NVIDIA GPU acceleration + if '+cu' in torver: + llama_cpp = re.search('(?<=llama-cpp-python==)\d+(?:\.\d+)*', textgen_requirements) + if llama_cpp is not None: + run_cmd(f'python -m pip install llama-cpp-python=={llama_cpp[0]} --force-reinstall --no-deps --index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117', environment=True) + # Finds the path to your dependencies for sitedir in site.getsitepackages(): if "site-packages" in sitedir: From 4df3f72753b2f54eb88079f8cbbdaf92ef17c849 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 19 Jul 2023 20:25:09 -0500 Subject: [PATCH 093/133] Fix GPTQ fail message not being shown on update (#103) --- webui.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index 5049f700..5b542516 100644 --- a/webui.py +++ b/webui.py @@ -219,16 +219,17 @@ def update_dependencies(): if os.path.exists('setup_cuda.py'): os.rename("setup_cuda.py", "setup.py") - run_cmd("python -m pip install .", environment=True) + build_gptq = run_cmd("python -m pip install .", environment=True).returncode == 0 # Wheel installation can fail while in the build directory of a package with the same name os.chdir("..") # If the path does not exist, then the install failed quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") - if not glob.glob(quant_cuda_path_regex): + quant_cuda_path = glob.glob(quant_cuda_path_regex) + if not build_gptq: # Attempt installation via alternative, Windows/Linux-specific method - if sys.platform.startswith("win") or sys.platform.startswith("linux"): + if (sys.platform.startswith("win") or sys.platform.startswith("linux")) and not quant_cuda_path: print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.") url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl" if sys.platform.startswith("linux"): @@ -239,6 +240,8 @@ def update_dependencies(): print("Wheel installation success!") else: print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.") + elif quant_cuda_path: + print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nquant_cuda has already been installed.") else: print("ERROR: GPTQ CUDA kernel compilation failed.") print("You will not be able to use GPTQ-based models.") From fcb215fed508b544671aeb8c81fd2c86cd977bfa Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 20 Jul 2023 09:11:00 -0500 Subject: [PATCH 094/133] Add check for compute support for GPTQ-for-LLaMa (#104) Installs from main cuda repo if fork not supported Also removed cuBLAS llama-cpp-python installation in preperation for https://github.com/oobabooga/text-generation-webui/commit/4b19b74e6c8d9c99634e16774d3ebcb618ba7a18 --- webui.py | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/webui.py b/webui.py index 5b542516..0304e09f 100644 --- a/webui.py +++ b/webui.py @@ -137,22 +137,6 @@ def update_dependencies(): if os.path.exists(extension_req_path): run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) - # Latest bitsandbytes requires minimum compute 7.0 - # nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" - # min_compute = 70 - # compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) - # old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" - # if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): - # old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 - # message = "\n\nWARNING: GPU with compute < 7.0 detected!\n" - # if old_bnb_install: - # message += "Older version of bitsandbytes has been installed to maintain compatibility.\n" - # message += "You will be unable to use --load-in-4bit!\n" - # else: - # message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n" - - # print_big_message(message) - # The following dependencies are for CUDA, not CPU # Parse output of 'pip show torch' to determine torch version torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) @@ -162,11 +146,9 @@ def update_dependencies(): if '+cu' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: return - # Install llama-cpp-python built with cuBLAS support for NVIDIA GPU acceleration - if '+cu' in torver: - llama_cpp = re.search('(?<=llama-cpp-python==)\d+(?:\.\d+)*', textgen_requirements) - if llama_cpp is not None: - run_cmd(f'python -m pip install llama-cpp-python=={llama_cpp[0]} --force-reinstall --no-deps --index-url=https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/AVX2/cu117', environment=True) + # Get GPU CUDA/compute support + nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" + compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) # Finds the path to your dependencies for sitedir in site.getsitepackages(): @@ -200,9 +182,17 @@ def update_dependencies(): if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) + # oobabooga fork requires min compute of 6.0 + gptq_min_compute = 60 + gptq_min_compute_check = any(int(compute) >= gptq_min_compute for compute in compute_array.stdout.decode('utf-8').split(',')) if compute_array.returncode == 0 else False + # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization if not os.path.exists("GPTQ-for-LLaMa/"): - run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) + # Install oobabooga fork if min compute met or if failed to check + if gptq_min_compute_check or compute_array.returncode != 0: + run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) + else: + run_cmd("git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) # Install GPTQ-for-LLaMa dependencies os.chdir("GPTQ-for-LLaMa") @@ -231,9 +221,8 @@ def update_dependencies(): # Attempt installation via alternative, Windows/Linux-specific method if (sys.platform.startswith("win") or sys.platform.startswith("linux")) and not quant_cuda_path: print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.") - url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl" - if sys.platform.startswith("linux"): - url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl" + wheel = f"{'' if gptq_min_compute_check or compute_array.returncode != 0 else '832e220d6dbf11bec5eaa8b221a52c1c854d2a25/'}quant_cuda-0.0.0-cp310-cp310-{'linux_x86_64' if sys.platform.startswith('linux') else 'win_amd64'}.whl" + url = f"https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/{'Linux-x64' if sys.platform.startswith('linux') else 'main'}/" + wheel result = run_cmd("python -m pip install " + url, environment=True) if result.returncode == 0: From cc2ed46d44cb8aed982c2a13ccfe6b27c06b6068 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 20 Jul 2023 18:55:09 -0300 Subject: [PATCH 095/133] Make chat the default again --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 0304e09f..080e94c1 100644 --- a/webui.py +++ b/webui.py @@ -12,7 +12,7 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") # Use this to set your command-line flags. For the full list, see: # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui # Example: CMD_FLAGS = '--chat --listen' -CMD_FLAGS = '' +CMD_FLAGS = '--chat' # Allows users to set flags in "OOBABOOGA_FLAGS" environment variable From 52e3b91f5e8092f7a49b21544acac52f35ec5387 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Tue, 25 Jul 2023 23:55:08 -0500 Subject: [PATCH 096/133] Fix broken gxx_linux-64 package. (#106) --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 080e94c1..e534ab22 100644 --- a/webui.py +++ b/webui.py @@ -203,7 +203,7 @@ def update_dependencies(): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: # Install the correct version of g++ - run_cmd("conda install -y -k gxx_linux-64=11.2.0", environment=True) + run_cmd("conda install -y -k gxx_linux-64=11.2.0 -c conda-forge", environment=True) # Compile and install GPTQ-for-LLaMa if os.path.exists('setup_cuda.py'): From 1e3c950c7d8137e4e87e5399cb9cdebc688e709f Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:33:02 -0500 Subject: [PATCH 097/133] Add AMD GPU support for Linux (#98) --- INSTRUCTIONS.TXT | 9 +++++ webui.py | 102 ++++++++++++++++++++++++++++++++--------------- 2 files changed, 79 insertions(+), 32 deletions(-) diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT index c25e4a96..c93924d6 100644 --- a/INSTRUCTIONS.TXT +++ b/INSTRUCTIONS.TXT @@ -28,3 +28,12 @@ CMD_FLAGS = '--chat --api' To run an interactive shell in the miniconda environment, run the "cmd" script. This is useful for installing additional requirements manually. + +# Using an AMD GPU in Linux + +Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. +Some systems may also need: sudo apt-get install libstdc++-12-dev + +Edit the "webui.py" script using a text editor and un-comment and modify the +lines near the top of the script according to your setup. In particular, modify +the os.environ["ROCM_PATH"] = '/opt/rocm' line to point to your ROCm installation. diff --git a/webui.py b/webui.py index e534ab22..3833e96a 100644 --- a/webui.py +++ b/webui.py @@ -21,6 +21,12 @@ if "OOBABOOGA_FLAGS" in os.environ: print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':") print(CMD_FLAGS) print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n") + + +# Remove the '# ' from the following lines as needed for your AMD GPU on Linux +# os.environ["ROCM_PATH"] = '/opt/rocm' +# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0' +# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' def print_big_message(message): @@ -73,7 +79,7 @@ def install_dependencies(): print("What is your GPU") print() print("A) NVIDIA") - print("B) AMD") + print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.4.2/5.4.3 on Linux)") print("C) Apple M Series") print("D) None (I want to run in CPU mode)") print() @@ -85,12 +91,15 @@ def install_dependencies(): # Install the version of PyTorch needed if gpuchoice == "a": run_cmd('conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) - elif gpuchoice == "b": - print("AMD GPUs are not supported. Exiting...") - sys.exit() - elif gpuchoice == "c": + elif gpuchoice == "b" and not sys.platform.startswith("darwin"): + if sys.platform.startswith("linux"): + run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2', assert_success=True, environment=True) + else: + print("AMD GPUs are only supported on Linux. Exiting...") + sys.exit() + elif (gpuchoice == "c" or gpuchoice == "b") and sys.platform.startswith("darwin"): run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) - elif gpuchoice == "d": + elif gpuchoice == "d" or gpuchoice == "c": if sys.platform.startswith("linux"): run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu", assert_success=True, environment=True) else: @@ -142,24 +151,16 @@ def update_dependencies(): torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0] - # Check for '+cu' in version string to determine if torch uses CUDA or not check for pytorch-cuda as well for backwards compatibility - if '+cu' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: + # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm check for pytorch-cuda as well for backwards compatibility + if '+cu' not in torver and '+rocm' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: return # Get GPU CUDA/compute support - nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" - compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) - - # Finds the path to your dependencies - for sitedir in site.getsitepackages(): - if "site-packages" in sitedir: - site_packages_path = sitedir - break - - # This path is critical to installing the following dependencies - if site_packages_path is None: - print("Could not find the path to your Python packages. Exiting...") - sys.exit() + if '+cu' in torver: + nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" + compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) + else: + compute_array = type('obj', (object,), {'stdout': b'', 'returncode': 1}) # Fix a bitsandbytes compatibility issue with Linux # if sys.platform.startswith("linux"): @@ -177,6 +178,14 @@ def update_dependencies(): os.chdir("exllama") run_cmd("git pull", environment=True) os.chdir("..") + + # Pre-installed exllama module does not support AMD GPU + if '+rocm' in torver: + run_cmd("python -m pip uninstall -y exllama", environment=True) + # Get download URL for latest exllama ROCm wheel + exllama_rocm = run_cmd('curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm: + run_cmd("python -m pip install " + exllama_rocm, environment=True) # Fix build issue with exllama in Linux/WSL if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): @@ -189,15 +198,13 @@ def update_dependencies(): # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization if not os.path.exists("GPTQ-for-LLaMa/"): # Install oobabooga fork if min compute met or if failed to check - if gptq_min_compute_check or compute_array.returncode != 0: + if '+rocm' in torver: + run_cmd("git clone https://github.com/WapaMario63/GPTQ-for-LLaMa-ROCm.git GPTQ-for-LLaMa -b rocm", assert_success=True, environment=True) + elif gptq_min_compute_check or compute_array.returncode != 0: run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) else: run_cmd("git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) - # Install GPTQ-for-LLaMa dependencies - os.chdir("GPTQ-for-LLaMa") - run_cmd("git pull", assert_success=True, environment=True) - # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa if sys.platform.startswith("linux"): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) @@ -205,8 +212,36 @@ def update_dependencies(): # Install the correct version of g++ run_cmd("conda install -y -k gxx_linux-64=11.2.0 -c conda-forge", environment=True) + # Install/Update ROCm AutoGPTQ for AMD GPUs + if '+rocm' in torver: + if run_cmd("[ -d ./AutoGPTQ-rocm ] && rm -rfd ./AutoGPTQ-rocm; git clone https://github.com/jllllll/AutoGPTQ.git ./AutoGPTQ-rocm -b rocm && cp ./AutoGPTQ-rocm/setup_rocm.py ./AutoGPTQ-rocm/setup.py && python -m pip install ./AutoGPTQ-rocm --force-reinstall --no-deps", environment=True).returncode != 0: + print_big_message("WARNING: AutoGPTQ kernel compilation failed!\n The installer will proceed to install a pre-compiled wheel.") + if run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/ROCm-5.4.2/auto_gptq-0.3.2%2Brocm5.4.2-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps", environment=True).returncode != 0: + print_big_message("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ.") + + # Install GPTQ-for-LLaMa dependencies + os.chdir("GPTQ-for-LLaMa") + run_cmd("git pull", environment=True) + + # Finds the path to your dependencies + for sitedir in site.getsitepackages(): + if "site-packages" in sitedir: + site_packages_path = sitedir + break + + # This path is critical to installing the following dependencies + if site_packages_path is None: + print("Could not find the path to your Python packages. Exiting...") + sys.exit() + # Compile and install GPTQ-for-LLaMa - if os.path.exists('setup_cuda.py'): + if '+rocm' in torver: + if os.path.exists('setup_rocm.py'): + os.replace("setup_rocm.py", "setup.py") + # Skip compile for AMD GPU if wheel is successfully installed + if rocm_gptq: + return + elif os.path.exists('setup_cuda.py'): os.rename("setup_cuda.py", "setup.py") build_gptq = run_cmd("python -m pip install .", environment=True).returncode == 0 @@ -214,18 +249,21 @@ def update_dependencies(): # Wheel installation can fail while in the build directory of a package with the same name os.chdir("..") - # If the path does not exist, then the install failed + # If the path does not exist or if command returncode is not 0, then the install failed or was potentially installed outside env quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") quant_cuda_path = glob.glob(quant_cuda_path_regex) if not build_gptq: # Attempt installation via alternative, Windows/Linux-specific method - if (sys.platform.startswith("win") or sys.platform.startswith("linux")) and not quant_cuda_path: + if sys.platform.startswith("win") or sys.platform.startswith("linux") and not quant_cuda_path: print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.") - wheel = f"{'' if gptq_min_compute_check or compute_array.returncode != 0 else '832e220d6dbf11bec5eaa8b221a52c1c854d2a25/'}quant_cuda-0.0.0-cp310-cp310-{'linux_x86_64' if sys.platform.startswith('linux') else 'win_amd64'}.whl" + if '+rocm' in torver: + wheel = 'ROCm-5.4.2/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl' + else: + wheel = f"{'' if gptq_min_compute_check or compute_array.returncode != 0 else '832e220d6dbf11bec5eaa8b221a52c1c854d2a25/'}quant_cuda-0.0.0-cp310-cp310-{'linux_x86_64' if sys.platform.startswith('linux') else 'win_amd64'}.whl" url = f"https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/{'Linux-x64' if sys.platform.startswith('linux') else 'main'}/" + wheel result = run_cmd("python -m pip install " + url, environment=True) - if result.returncode == 0: + if result.returncode == 0 and glob.glob(quant_cuda_path_regex): print("Wheel installation success!") else: print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.") @@ -233,7 +271,7 @@ def update_dependencies(): print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nquant_cuda has already been installed.") else: print("ERROR: GPTQ CUDA kernel compilation failed.") - print("You will not be able to use GPTQ-based models.") + print("You will not be able to use GPTQ-based models with GPTQ-for-LLaMa.") print("Continuing with install..") From ecd92d6a4e9a7c74d2bf436571f2c7e96beb9f92 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 26 Jul 2023 20:16:36 -0500 Subject: [PATCH 098/133] Remove unused variable from ROCm GPTQ install (#107) --- webui.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/webui.py b/webui.py index 3833e96a..dd949dae 100644 --- a/webui.py +++ b/webui.py @@ -238,9 +238,6 @@ def update_dependencies(): if '+rocm' in torver: if os.path.exists('setup_rocm.py'): os.replace("setup_rocm.py", "setup.py") - # Skip compile for AMD GPU if wheel is successfully installed - if rocm_gptq: - return elif os.path.exists('setup_cuda.py'): os.rename("setup_cuda.py", "setup.py") From aca5679968b59db3d2d4bc61f3c232300f5e25c1 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Wed, 2 Aug 2023 21:39:07 -0500 Subject: [PATCH 099/133] Properly fix broken gcc_linux-64 package (#115) --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index dd949dae..46f6e4c6 100644 --- a/webui.py +++ b/webui.py @@ -210,7 +210,7 @@ def update_dependencies(): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: # Install the correct version of g++ - run_cmd("conda install -y -k gxx_linux-64=11.2.0 -c conda-forge", environment=True) + run_cmd("conda install -y -k conda-forge::gxx_linux-64=11.2.0", environment=True) # Install/Update ROCm AutoGPTQ for AMD GPUs if '+rocm' in torver: From 601fc424cd2a5bb6abed3486863345b6eb6df38f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 3 Aug 2023 14:39:46 -0300 Subject: [PATCH 100/133] Several improvements (#117) --- CMD_FLAGS.txt | 1 + INSTRUCTIONS-WSL.TXT => INSTRUCTIONS-WSL.txt | 0 INSTRUCTIONS.TXT | 39 ------------- INSTRUCTIONS.txt | 38 +++++++++++++ generate_zips.sh | 4 +- webui.py | 59 ++++++++++++-------- 6 files changed, 76 insertions(+), 65 deletions(-) create mode 100644 CMD_FLAGS.txt rename INSTRUCTIONS-WSL.TXT => INSTRUCTIONS-WSL.txt (100%) delete mode 100644 INSTRUCTIONS.TXT create mode 100644 INSTRUCTIONS.txt diff --git a/CMD_FLAGS.txt b/CMD_FLAGS.txt new file mode 100644 index 00000000..4e218112 --- /dev/null +++ b/CMD_FLAGS.txt @@ -0,0 +1 @@ +--chat \ No newline at end of file diff --git a/INSTRUCTIONS-WSL.TXT b/INSTRUCTIONS-WSL.txt similarity index 100% rename from INSTRUCTIONS-WSL.TXT rename to INSTRUCTIONS-WSL.txt diff --git a/INSTRUCTIONS.TXT b/INSTRUCTIONS.TXT deleted file mode 100644 index c93924d6..00000000 --- a/INSTRUCTIONS.TXT +++ /dev/null @@ -1,39 +0,0 @@ -Thank you for downloading oobabooga/text-generation-webui. - -# Installation - -Run the "start" script. If all goes right, it should take care of -everything for you. - -To launch the web UI in the future after it is already installed, run -the same "start" script. - -# Updating the web UI - -Run the "update" script. This will only install the updates, so it should -be much faster than the initial installation. - -# Adding flags like --chat, --notebook, etc - -Edit the "webui.py" script using a text editor and add the desired flags -to the CMD_FLAGS variable at the top. It should look like this: - -CMD_FLAGS = '--chat' - -For instance, to add the --api flag, change it to - -CMD_FLAGS = '--chat --api' - -# Running an interactive shell - -To run an interactive shell in the miniconda environment, run the "cmd" -script. This is useful for installing additional requirements manually. - -# Using an AMD GPU in Linux - -Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. -Some systems may also need: sudo apt-get install libstdc++-12-dev - -Edit the "webui.py" script using a text editor and un-comment and modify the -lines near the top of the script according to your setup. In particular, modify -the os.environ["ROCM_PATH"] = '/opt/rocm' line to point to your ROCm installation. diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt new file mode 100644 index 00000000..053ad4b4 --- /dev/null +++ b/INSTRUCTIONS.txt @@ -0,0 +1,38 @@ +Thank you for downloading oobabooga/text-generation-webui! + +# Installation + +Run the "start" script. It will install the web UI and all of its +dependencies inside this folder. + +To launch the web UI in the future after it is already installed, run the +"start" script again. + +# Updating the web UI + +Run the "update" script. It will install the updates only, so it should +be much faster than the initial installation. + +# Adding flags like --chat, --notebook, etc + +Open the "CMD_FLAGS.txt" file with a text editor, add your flags, and +save the file. For instance, to add the --api flag, change the file +contents to + +--chat --api + +# Running an interactive shell + +Sometimes you may need to install some additional Python package. To do +that, run the "cmd" script and type your commands inside the terminal +window that will appear. + +# Using an AMD GPU in Linux + +Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. Some systems may also +need: sudo apt-get install libstdc++-12-dev + +Edit the "webui.py" script using a text editor and un-comment and +modify the lines near the top of the script according to your setup. In +particular, modify the os.environ["ROCM_PATH"] = '/opt/rocm' line to +point to your ROCm installation. diff --git a/generate_zips.sh b/generate_zips.sh index 2f852182..cdc2cc44 100644 --- a/generate_zips.sh +++ b/generate_zips.sh @@ -1,6 +1,6 @@ mkdir oobabooga_{windows,linux,macos,wsl} for p in windows macos linux wsl; do - if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.TXT} oobabooga_$p; - else cp {*$p*\.*,webui.py,INSTRUCTIONS.TXT} oobabooga_$p; fi + if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.txt} oobabooga_$p; + else cp {*$p*\.*,webui.py,INSTRUCTIONS.txt,CMD_FLAGS.txt} oobabooga_$p; fi zip -r oobabooga_$p.zip oobabooga_$p; done diff --git a/webui.py b/webui.py index 46f6e4c6..508436c0 100644 --- a/webui.py +++ b/webui.py @@ -1,7 +1,7 @@ import argparse import glob -import re import os +import re import site import subprocess import sys @@ -9,19 +9,19 @@ import sys script_dir = os.getcwd() conda_env_path = os.path.join(script_dir, "installer_files", "env") -# Use this to set your command-line flags. For the full list, see: -# https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui -# Example: CMD_FLAGS = '--chat --listen' -CMD_FLAGS = '--chat' - - -# Allows users to set flags in "OOBABOOGA_FLAGS" environment variable +# Command-line flags if "OOBABOOGA_FLAGS" in os.environ: CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':") print(CMD_FLAGS) print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n") - +else: + cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") + if os.path.exists(cmd_flags_path): + CMD_FLAGS = open(cmd_flags_path, 'r').read().strip() + else: + CMD_FLAGS = '--chat' + # Remove the '# ' from the following lines as needed for your AMD GPU on Linux # os.environ["ROCM_PATH"] = '/opt/rocm' @@ -74,6 +74,11 @@ def check_env(): sys.exit() +def clear_cache(): + run_cmd("conda clean -a -y", environment=True) + run_cmd("python -m pip cache purge", environment=True) + + def install_dependencies(): # Select your GPU or, choose to run in CPU mode print("What is your GPU") @@ -81,12 +86,12 @@ def install_dependencies(): print("A) NVIDIA") print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.4.2/5.4.3 on Linux)") print("C) Apple M Series") - print("D) None (I want to run in CPU mode)") + print("D) None (I want to run models in CPU mode)") print() gpuchoice = input("Input> ").lower() if gpuchoice == "d": - print_big_message("Once the installation ends, make sure to open webui.py with a text editor\nand add the --cpu flag to CMD_FLAGS.") + print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") # Install the version of PyTorch needed if gpuchoice == "a": @@ -113,10 +118,10 @@ def install_dependencies(): run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) # Install the webui dependencies - update_dependencies() + update_dependencies(initial_installation=True) -def update_dependencies(): +def update_dependencies(initial_installation=False): os.chdir("text-generation-webui") run_cmd("git pull", assert_success=True, environment=True) @@ -135,16 +140,19 @@ def update_dependencies(): run_cmd("python -m pip uninstall -y " + package_name, environment=True) print(f"Uninstalled {package_name}") - # Installs/Updates dependencies from all requirements.txt + # Installs/Updates the project dependencies run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) - extensions = next(os.walk("extensions"))[1] - for extension in extensions: - if extension in ['superbooga']: # No wheels available for dependencies - continue - extension_req_path = os.path.join("extensions", extension, "requirements.txt") - if os.path.exists(extension_req_path): - run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) + # Installs the extensions dependencies (only on the first install) + if initial_installation: + extensions = next(os.walk("extensions"))[1] + for extension in extensions: + if extension in ['superbooga']: # No wheels available for dependencies + continue + + extension_req_path = os.path.join("extensions", extension, "requirements.txt") + if os.path.exists(extension_req_path): + run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) # The following dependencies are for CUDA, not CPU # Parse output of 'pip show torch' to determine torch version @@ -153,6 +161,7 @@ def update_dependencies(): # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm check for pytorch-cuda as well for backwards compatibility if '+cu' not in torver and '+rocm' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: + clear_cache() return # Get GPU CUDA/compute support @@ -178,7 +187,7 @@ def update_dependencies(): os.chdir("exllama") run_cmd("git pull", environment=True) os.chdir("..") - + # Pre-installed exllama module does not support AMD GPU if '+rocm' in torver: run_cmd("python -m pip uninstall -y exllama", environment=True) @@ -222,7 +231,7 @@ def update_dependencies(): # Install GPTQ-for-LLaMa dependencies os.chdir("GPTQ-for-LLaMa") run_cmd("git pull", environment=True) - + # Finds the path to your dependencies for sitedir in site.getsitepackages(): if "site-packages" in sitedir: @@ -272,6 +281,8 @@ def update_dependencies(): print("Continuing with install..") + clear_cache() + def download_model(): os.chdir("text-generation-webui") @@ -301,7 +312,7 @@ if __name__ == "__main__": # Check if a model has been downloaded yet if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: - print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.") + print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.") # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist conda_path_bin = os.path.join(conda_env_path, "bin") From 9e17325207ad264daec10e7506ecee6db49ba1e2 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Sat, 5 Aug 2023 08:26:24 -0500 Subject: [PATCH 101/133] Add CMD_FLAGS.txt functionality to WSL installer (#119) --- generate_zips.sh | 2 +- wsl.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/generate_zips.sh b/generate_zips.sh index cdc2cc44..f3131745 100644 --- a/generate_zips.sh +++ b/generate_zips.sh @@ -1,6 +1,6 @@ mkdir oobabooga_{windows,linux,macos,wsl} for p in windows macos linux wsl; do - if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.txt} oobabooga_$p; + if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.txt,CMD_FLAGS.txt} oobabooga_$p; else cp {*$p*\.*,webui.py,INSTRUCTIONS.txt,CMD_FLAGS.txt} oobabooga_$p; fi zip -r oobabooga_$p.zip oobabooga_$p; done diff --git a/wsl.sh b/wsl.sh index 2d5d5405..50b8d77d 100644 --- a/wsl.sh +++ b/wsl.sh @@ -46,6 +46,7 @@ if [[ "$INSTALL_DIR" =~ " " ]]; then echo This script relies on Miniconda which # create install dir if missing and copy webui.py to install dir to maintain functionality without edit if [ ! -d "$INSTALL_DIR" ]; then mkdir -p "$INSTALL_DIR" || exit; fi cp -u "./webui.py" "$INSTALL_DIR" +if [ -f "./CMD_FLAGS.txt" ]; then cp -u "./CMD_FLAGS.txt" "$INSTALL_DIR"; fi # figure out whether git and conda needs to be installed if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi From fa4a948b387c9a286398df6e1a16b4a8a32b2aa5 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 9 Aug 2023 01:58:23 -0300 Subject: [PATCH 102/133] Allow users to write one flag per line in CMD_FLAGS.txt --- webui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 508436c0..bf2c241a 100644 --- a/webui.py +++ b/webui.py @@ -18,7 +18,8 @@ if "OOBABOOGA_FLAGS" in os.environ: else: cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") if os.path.exists(cmd_flags_path): - CMD_FLAGS = open(cmd_flags_path, 'r').read().strip() + with open(cmd_flags_path, 'r') as f: + CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) else: CMD_FLAGS = '--chat' From 28e3ce4317415a8423bd783204d14483147a25cd Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 10 Aug 2023 11:19:47 -0500 Subject: [PATCH 103/133] Simplify GPTQ-for-LLaMa installation (#122) --- webui.py | 98 ++++++++------------------------------------------------ 1 file changed, 14 insertions(+), 84 deletions(-) diff --git a/webui.py b/webui.py index bf2c241a..e065b609 100644 --- a/webui.py +++ b/webui.py @@ -1,8 +1,6 @@ import argparse import glob import os -import re -import site import subprocess import sys @@ -126,10 +124,10 @@ def update_dependencies(initial_installation=False): os.chdir("text-generation-webui") run_cmd("git pull", assert_success=True, environment=True) + textgen_requirements = open("requirements.txt").read().splitlines() + # Workaround for git+ packages not updating properly Also store requirements.txt for later use - with open("requirements.txt") as f: - textgen_requirements = f.read() - git_requirements = [req for req in textgen_requirements.splitlines() if req.startswith("git+")] + git_requirements = [req for req in textgen_requirements if req.startswith("git+")] # Loop through each "git+" requirement and uninstall it for req in git_requirements: @@ -165,17 +163,6 @@ def update_dependencies(initial_installation=False): clear_cache() return - # Get GPU CUDA/compute support - if '+cu' in torver: - nvcc_device_query = "__nvcc_device_query" if not sys.platform.startswith("win") else "__nvcc_device_query.exe" - compute_array = run_cmd(os.path.join(conda_env_path, "bin", nvcc_device_query), environment=True, capture_output=True) - else: - compute_array = type('obj', (object,), {'stdout': b'', 'returncode': 1}) - - # Fix a bitsandbytes compatibility issue with Linux - # if sys.platform.startswith("linux"): - # shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so")) - if not os.path.exists("repositories/"): os.mkdir("repositories") @@ -197,24 +184,10 @@ def update_dependencies(initial_installation=False): if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm: run_cmd("python -m pip install " + exllama_rocm, environment=True) - # Fix build issue with exllama in Linux/WSL + # Fix JIT compile issue with exllama in Linux/WSL if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) - # oobabooga fork requires min compute of 6.0 - gptq_min_compute = 60 - gptq_min_compute_check = any(int(compute) >= gptq_min_compute for compute in compute_array.stdout.decode('utf-8').split(',')) if compute_array.returncode == 0 else False - - # Install GPTQ-for-LLaMa which enables 4bit CUDA quantization - if not os.path.exists("GPTQ-for-LLaMa/"): - # Install oobabooga fork if min compute met or if failed to check - if '+rocm' in torver: - run_cmd("git clone https://github.com/WapaMario63/GPTQ-for-LLaMa-ROCm.git GPTQ-for-LLaMa -b rocm", assert_success=True, environment=True) - elif gptq_min_compute_check or compute_array.returncode != 0: - run_cmd("git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) - else: - run_cmd("git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda", assert_success=True, environment=True) - # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa if sys.platform.startswith("linux"): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) @@ -224,64 +197,21 @@ def update_dependencies(initial_installation=False): # Install/Update ROCm AutoGPTQ for AMD GPUs if '+rocm' in torver: - if run_cmd("[ -d ./AutoGPTQ-rocm ] && rm -rfd ./AutoGPTQ-rocm; git clone https://github.com/jllllll/AutoGPTQ.git ./AutoGPTQ-rocm -b rocm && cp ./AutoGPTQ-rocm/setup_rocm.py ./AutoGPTQ-rocm/setup.py && python -m pip install ./AutoGPTQ-rocm --force-reinstall --no-deps", environment=True).returncode != 0: - print_big_message("WARNING: AutoGPTQ kernel compilation failed!\n The installer will proceed to install a pre-compiled wheel.") - if run_cmd("python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/ROCm-5.4.2/auto_gptq-0.3.2%2Brocm5.4.2-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps", environment=True).returncode != 0: - print_big_message("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ.") + auto_gptq_version = [req for req in textgen_requirements if req.startswith('https://github.com/PanQiWei/AutoGPTQ/releases/download/')][0].split('/')[7] + auto_gptq_wheel = run_cmd(f'curl -s https://api.github.com/repos/PanQiWei/AutoGPTQ/releases/tags/{auto_gptq_version} | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + if not auto_gptq_wheel and run_cmd(f"python -m pip install {auto_gptq_wheel} --force-reinstall --no-deps", environment=True).returncode != 0: + print_big_message("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ.") - # Install GPTQ-for-LLaMa dependencies - os.chdir("GPTQ-for-LLaMa") - run_cmd("git pull", environment=True) - - # Finds the path to your dependencies - for sitedir in site.getsitepackages(): - if "site-packages" in sitedir: - site_packages_path = sitedir - break - - # This path is critical to installing the following dependencies - if site_packages_path is None: - print("Could not find the path to your Python packages. Exiting...") - sys.exit() - - # Compile and install GPTQ-for-LLaMa + # Install GPTQ-for-LLaMa for ROCm if '+rocm' in torver: - if os.path.exists('setup_rocm.py'): - os.replace("setup_rocm.py", "setup.py") - elif os.path.exists('setup_cuda.py'): - os.rename("setup_cuda.py", "setup.py") - - build_gptq = run_cmd("python -m pip install .", environment=True).returncode == 0 - - # Wheel installation can fail while in the build directory of a package with the same name - os.chdir("..") - - # If the path does not exist or if command returncode is not 0, then the install failed or was potentially installed outside env - quant_cuda_path_regex = os.path.join(site_packages_path, "quant_cuda*/") - quant_cuda_path = glob.glob(quant_cuda_path_regex) - if not build_gptq: - # Attempt installation via alternative, Windows/Linux-specific method - if sys.platform.startswith("win") or sys.platform.startswith("linux") and not quant_cuda_path: - print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.") - if '+rocm' in torver: - wheel = 'ROCm-5.4.2/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl' - else: - wheel = f"{'' if gptq_min_compute_check or compute_array.returncode != 0 else '832e220d6dbf11bec5eaa8b221a52c1c854d2a25/'}quant_cuda-0.0.0-cp310-cp310-{'linux_x86_64' if sys.platform.startswith('linux') else 'win_amd64'}.whl" - url = f"https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/{'Linux-x64' if sys.platform.startswith('linux') else 'main'}/" + wheel - - result = run_cmd("python -m pip install " + url, environment=True) - if result.returncode == 0 and glob.glob(quant_cuda_path_regex): - print("Wheel installation success!") - else: - print("ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models.") - elif quant_cuda_path: - print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nquant_cuda has already been installed.") + gptq_wheel = run_cmd('curl -s https://api.github.com/repos/jllllll/GPTQ-for-LLaMa-CUDA/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + install_gptq = run_cmd("python -m pip install " + gptq_wheel, environment=True).returncode == 0 + if install_gptq: + print("Wheel installation success!") else: - print("ERROR: GPTQ CUDA kernel compilation failed.") + print("ERROR: GPTQ wheel installation failed.") print("You will not be able to use GPTQ-based models with GPTQ-for-LLaMa.") - print("Continuing with install..") - clear_cache() From 949c92d7dfe65515e7e83318199865ff8d20f8fb Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 10 Aug 2023 14:32:40 -0300 Subject: [PATCH 104/133] Create README.md --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..1d4680e5 --- /dev/null +++ b/README.md @@ -0,0 +1,15 @@ +# One-click installers + +These are automated installers for [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui). + +The idea is to allow people to use the program without having to type commands in the terminal, thus making it more accessible. + +## How it works + +The `start` scripts download miniconda, create a conda environment inside the current folder, and then install the webui using that environment. + +After the initial installation, the `update` scripts are then used to automatically pull the latest text-generation-webui code and upgrade its requirements. + +## Limitations + +* The start/update scripts themselves are not automatically updated. To update them, you have to re-download the zips listed on the [main README](https://github.com/oobabooga/text-generation-webui#one-click-installers) and overwrite your existing files. From f7ad634634a9e1c2b3307ef9e2c67f864e0fc357 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 12 Aug 2023 21:13:50 -0700 Subject: [PATCH 105/133] Remove --chat flag --- CMD_FLAGS.txt | 1 - INSTRUCTIONS.txt | 4 ++-- update_linux.sh | 0 3 files changed, 2 insertions(+), 3 deletions(-) mode change 100644 => 100755 update_linux.sh diff --git a/CMD_FLAGS.txt b/CMD_FLAGS.txt index 4e218112..e69de29b 100644 --- a/CMD_FLAGS.txt +++ b/CMD_FLAGS.txt @@ -1 +0,0 @@ ---chat \ No newline at end of file diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt index 053ad4b4..6637b65a 100644 --- a/INSTRUCTIONS.txt +++ b/INSTRUCTIONS.txt @@ -13,13 +13,13 @@ To launch the web UI in the future after it is already installed, run the Run the "update" script. It will install the updates only, so it should be much faster than the initial installation. -# Adding flags like --chat, --notebook, etc +# Adding flags like --model, --api, etc Open the "CMD_FLAGS.txt" file with a text editor, add your flags, and save the file. For instance, to add the --api flag, change the file contents to ---chat --api +--api # Running an interactive shell diff --git a/update_linux.sh b/update_linux.sh old mode 100644 new mode 100755 From 689f264979d63ec3af240454e81c62a72bbe5e06 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 12 Aug 2023 21:14:37 -0700 Subject: [PATCH 106/133] Fix permission --- update_linux.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 update_linux.sh diff --git a/update_linux.sh b/update_linux.sh old mode 100755 new mode 100644 From b74bf5638b3fa1b2ff1b91b4c3257983ab25a85b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 14 Aug 2023 09:15:25 -0700 Subject: [PATCH 107/133] Install extensions dependencies before webui dependencies webui takes precedence over extensions. --- webui.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/webui.py b/webui.py index e065b609..813ab838 100644 --- a/webui.py +++ b/webui.py @@ -19,7 +19,7 @@ else: with open(cmd_flags_path, 'r') as f: CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) else: - CMD_FLAGS = '--chat' + CMD_FLAGS = '' # Remove the '# ' from the following lines as needed for your AMD GPU on Linux @@ -88,6 +88,9 @@ def install_dependencies(): print("D) None (I want to run models in CPU mode)") print() gpuchoice = input("Input> ").lower() + while gpuchoice not in ['a', 'b', 'c', 'd']: + print("Invalid choice. Please try again.") + gpuchoice = input("Input> ").lower() if gpuchoice == "d": print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") @@ -109,10 +112,6 @@ def install_dependencies(): else: run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) - else: - print("Invalid choice. Exiting...") - sys.exit() - # Clone webui to our computer run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) @@ -124,6 +123,17 @@ def update_dependencies(initial_installation=False): os.chdir("text-generation-webui") run_cmd("git pull", assert_success=True, environment=True) + # Install the extensions dependencies (only on the first install) + if initial_installation: + extensions = next(os.walk("extensions"))[1] + for extension in extensions: + if extension in ['superbooga']: # No wheels available for dependencies + continue + + extension_req_path = os.path.join("extensions", extension, "requirements.txt") + if os.path.exists(extension_req_path): + run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) + textgen_requirements = open("requirements.txt").read().splitlines() # Workaround for git+ packages not updating properly Also store requirements.txt for later use @@ -142,17 +152,6 @@ def update_dependencies(initial_installation=False): # Installs/Updates the project dependencies run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) - # Installs the extensions dependencies (only on the first install) - if initial_installation: - extensions = next(os.walk("extensions"))[1] - for extension in extensions: - if extension in ['superbooga']: # No wheels available for dependencies - continue - - extension_req_path = os.path.join("extensions", extension, "requirements.txt") - if os.path.exists(extension_req_path): - run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) - # The following dependencies are for CUDA, not CPU # Parse output of 'pip show torch' to determine torch version torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) From d7c98fe715b068db983378948ec58154e8c750e0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 22 Aug 2023 21:48:32 -0300 Subject: [PATCH 108/133] Update stale.yml --- .github/workflows/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ce603a4f..2de6d955 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -13,8 +13,8 @@ jobs: - uses: actions/stale@v5 with: stale-issue-message: "" - close-issue-message: "This issue has been closed due to inactivity for 30 days. If you believe it is still relevant, please leave a comment below." - days-before-issue-stale: 30 + close-issue-message: "This issue has been closed due to inactivity for 6 weeks. If you believe it is still relevant, please leave a comment below. You can tag a developer in your comment." + days-before-issue-stale: 42 days-before-issue-close: 0 stale-issue-label: "stale" days-before-pr-stale: -1 From b04b3957f962a06ff832da496e23866556c8f39a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:35:53 -0700 Subject: [PATCH 109/133] Move one-click-installers into the repository --- CMD_FLAGS.txt | 0 cmd_linux.sh | 19 ++++ cmd_macos.sh | 24 +++++ cmd_windows.bat | 31 ++++++ cmd_wsl.bat | 11 ++ start_linux.sh | 64 ++++++++++++ start_macos.sh | 64 ++++++++++++ start_windows.bat | 81 +++++++++++++++ start_wsl.bat | 11 ++ update_linux.sh | 26 +++++ update_macos.sh | 26 +++++ update_windows.bat | 34 ++++++ update_wsl.bat | 11 ++ webui.py | 253 +++++++++++++++++++++++++++++++++++++++++++++ wsl.sh | 90 ++++++++++++++++ 15 files changed, 745 insertions(+) create mode 100644 CMD_FLAGS.txt create mode 100755 cmd_linux.sh create mode 100755 cmd_macos.sh create mode 100755 cmd_windows.bat create mode 100755 cmd_wsl.bat create mode 100755 start_linux.sh create mode 100755 start_macos.sh create mode 100755 start_windows.bat create mode 100755 start_wsl.bat create mode 100755 update_linux.sh create mode 100755 update_macos.sh create mode 100755 update_windows.bat create mode 100755 update_wsl.bat create mode 100644 webui.py create mode 100755 wsl.sh diff --git a/CMD_FLAGS.txt b/CMD_FLAGS.txt new file mode 100644 index 00000000..e69de29b diff --git a/cmd_linux.sh b/cmd_linux.sh new file mode 100755 index 00000000..0a4ef620 --- /dev/null +++ b/cmd_linux.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate env +bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"") diff --git a/cmd_macos.sh b/cmd_macos.sh new file mode 100755 index 00000000..0cec16e9 --- /dev/null +++ b/cmd_macos.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# deactivate existing env if needed +conda deactivate 2> /dev/null + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate env +source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh +conda activate $INSTALL_ENV_DIR +exec bash --norc diff --git a/cmd_windows.bat b/cmd_windows.bat new file mode 100755 index 00000000..606ff485 --- /dev/null +++ b/cmd_windows.bat @@ -0,0 +1,31 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + +@rem config +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env + +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem enter commands +cmd /k "%*" + +:end +pause diff --git a/cmd_wsl.bat b/cmd_wsl.bat new file mode 100755 index 00000000..f9f4348a --- /dev/null +++ b/cmd_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd" + +:end +pause diff --git a/start_linux.sh b/start_linux.sh new file mode 100755 index 00000000..dc37f612 --- /dev/null +++ b/start_linux.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +OS_ARCH=$(uname -m) +case "${OS_ARCH}" in + x86_64*) OS_ARCH="x86_64";; + arm64*) OS_ARCH="aarch64";; + aarch64*) OS_ARCH="aarch64";; + *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit +esac + +# config +INSTALL_DIR="$(pwd)/installer_files" +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-${OS_ARCH}.sh" +conda_exists="F" + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + mkdir -p "$INSTALL_DIR" + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env +python webui.py diff --git a/start_macos.sh b/start_macos.sh new file mode 100755 index 00000000..a813edb3 --- /dev/null +++ b/start_macos.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# M Series or Intel +OS_ARCH=$(uname -m) +case "${OS_ARCH}" in + x86_64*) OS_ARCH="x86_64";; + arm64*) OS_ARCH="arm64";; + *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit +esac + +# config +INSTALL_DIR="$(pwd)/installer_files" +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-MacOSX-${OS_ARCH}.sh" +conda_exists="F" + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + mkdir -p "$INSTALL_DIR" + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env +python webui.py diff --git a/start_windows.bat b/start_windows.bat new file mode 100755 index 00000000..4f5b0ba5 --- /dev/null +++ b/start_windows.bat @@ -0,0 +1,81 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +@rem Check for special characters in installation path +set "SPCHARMESSAGE="WARNING: Special characters were detected in the installation path!" " This can cause the installation to fail!"" +echo "%CD%"| findstr /R /C:"[!#\$%&()\*+,;<=>?@\[\]\^`{|}~]" >nul && ( + call :PrintBigMessage %SPCHARMESSAGE% +) +set SPCHARMESSAGE= + +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + +@rem config +set INSTALL_DIR=%cd%\installer_files +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env +set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Windows-x86_64.exe +set conda_exists=F + +@rem figure out whether git and conda needs to be installed +call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1 +if "%ERRORLEVEL%" EQU "0" set conda_exists=T + +@rem (if necessary) install git and conda into a contained environment +@rem download conda +if "%conda_exists%" == "F" ( + echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe + + mkdir "%INSTALL_DIR%" + call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end ) + + echo Installing Miniconda to %CONDA_ROOT_PREFIX% + start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX% + + @rem test the conda binary + echo Miniconda version: + call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end ) +) + +@rem create the installer env +if not exist "%INSTALL_ENV_DIR%" ( + echo Packages to install: %PACKAGES_TO_INSTALL% + call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end ) +) + +@rem check if conda environment was actually created +if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end ) + +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem setup installer env +call python webui.py + +@rem below are functions for the script next line skips these during normal execution +goto end + +:PrintBigMessage +echo. && echo. +echo ******************************************************************* +for %%M in (%*) do echo * %%~M +echo ******************************************************************* +echo. && echo. +exit /b + +:end +pause diff --git a/start_wsl.bat b/start_wsl.bat new file mode 100755 index 00000000..41fa572f --- /dev/null +++ b/start_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh" + +:end +pause diff --git a/update_linux.sh b/update_linux.sh new file mode 100755 index 00000000..f7be8440 --- /dev/null +++ b/update_linux.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# update installer env +python webui.py --update + +echo +echo "Done!" diff --git a/update_macos.sh b/update_macos.sh new file mode 100755 index 00000000..f7be8440 --- /dev/null +++ b/update_macos.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# config +CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" +INSTALL_ENV_DIR="$(pwd)/installer_files/env" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# update installer env +python webui.py --update + +echo +echo "Done!" diff --git a/update_windows.bat b/update_windows.bat new file mode 100755 index 00000000..a44e2188 --- /dev/null +++ b/update_windows.bat @@ -0,0 +1,34 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end + +@rem fix failed install when installing to a separate drive +set TMP=%cd%\installer_files +set TEMP=%cd%\installer_files + +@rem config +set CONDA_ROOT_PREFIX=%cd%\installer_files\conda +set INSTALL_ENV_DIR=%cd%\installer_files\env + +@rem environment isolation +set PYTHONNOUSERSITE=1 +set PYTHONPATH= +set PYTHONHOME= +set "CUDA_PATH=%INSTALL_ENV_DIR%" +set "CUDA_HOME=%CUDA_PATH%" + +@rem activate installer env +call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) + +@rem update installer env +call python webui.py --update + +echo. +echo Done! + +:end +pause diff --git a/update_wsl.bat b/update_wsl.bat new file mode 100755 index 00000000..36d019a8 --- /dev/null +++ b/update_wsl.bat @@ -0,0 +1,11 @@ +@echo off + +cd /D "%~dp0" + +set PATH=%PATH%;%SystemRoot%\system32 + +@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script calling wsl.sh with 'update' will run updater +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh update" + +:end +pause diff --git a/webui.py b/webui.py new file mode 100644 index 00000000..813ab838 --- /dev/null +++ b/webui.py @@ -0,0 +1,253 @@ +import argparse +import glob +import os +import subprocess +import sys + +script_dir = os.getcwd() +conda_env_path = os.path.join(script_dir, "installer_files", "env") + +# Command-line flags +if "OOBABOOGA_FLAGS" in os.environ: + CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] + print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':") + print(CMD_FLAGS) + print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n") +else: + cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") + if os.path.exists(cmd_flags_path): + with open(cmd_flags_path, 'r') as f: + CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) + else: + CMD_FLAGS = '' + + +# Remove the '# ' from the following lines as needed for your AMD GPU on Linux +# os.environ["ROCM_PATH"] = '/opt/rocm' +# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0' +# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' + + +def print_big_message(message): + message = message.strip() + lines = message.split('\n') + print("\n\n*******************************************************************") + for line in lines: + if line.strip() != '': + print("*", line) + + print("*******************************************************************\n\n") + + +def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): + # Use the conda environment + if environment: + if sys.platform.startswith("win"): + conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") + cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd + else: + conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") + cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd + + # Run shell commands + result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) + + # Assert the command ran successfully + if assert_success and result.returncode != 0: + print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") + sys.exit() + + return result + + +def check_env(): + # If we have access to conda, we are probably in an environment + conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0 + if not conda_exist: + print("Conda is not installed. Exiting...") + sys.exit() + + # Ensure this is a new environment and not the base environment + if os.environ["CONDA_DEFAULT_ENV"] == "base": + print("Create an environment for this project and activate it. Exiting...") + sys.exit() + + +def clear_cache(): + run_cmd("conda clean -a -y", environment=True) + run_cmd("python -m pip cache purge", environment=True) + + +def install_dependencies(): + # Select your GPU or, choose to run in CPU mode + print("What is your GPU") + print() + print("A) NVIDIA") + print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.4.2/5.4.3 on Linux)") + print("C) Apple M Series") + print("D) None (I want to run models in CPU mode)") + print() + gpuchoice = input("Input> ").lower() + while gpuchoice not in ['a', 'b', 'c', 'd']: + print("Invalid choice. Please try again.") + gpuchoice = input("Input> ").lower() + + if gpuchoice == "d": + print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") + + # Install the version of PyTorch needed + if gpuchoice == "a": + run_cmd('conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) + elif gpuchoice == "b" and not sys.platform.startswith("darwin"): + if sys.platform.startswith("linux"): + run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2', assert_success=True, environment=True) + else: + print("AMD GPUs are only supported on Linux. Exiting...") + sys.exit() + elif (gpuchoice == "c" or gpuchoice == "b") and sys.platform.startswith("darwin"): + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) + elif gpuchoice == "d" or gpuchoice == "c": + if sys.platform.startswith("linux"): + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu", assert_success=True, environment=True) + else: + run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) + + # Clone webui to our computer + run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) + + # Install the webui dependencies + update_dependencies(initial_installation=True) + + +def update_dependencies(initial_installation=False): + os.chdir("text-generation-webui") + run_cmd("git pull", assert_success=True, environment=True) + + # Install the extensions dependencies (only on the first install) + if initial_installation: + extensions = next(os.walk("extensions"))[1] + for extension in extensions: + if extension in ['superbooga']: # No wheels available for dependencies + continue + + extension_req_path = os.path.join("extensions", extension, "requirements.txt") + if os.path.exists(extension_req_path): + run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) + + textgen_requirements = open("requirements.txt").read().splitlines() + + # Workaround for git+ packages not updating properly Also store requirements.txt for later use + git_requirements = [req for req in textgen_requirements if req.startswith("git+")] + + # Loop through each "git+" requirement and uninstall it + for req in git_requirements: + # Extract the package name from the "git+" requirement + url = req.replace("git+", "") + package_name = url.split("/")[-1].split("@")[0] + + # Uninstall the package using pip + run_cmd("python -m pip uninstall -y " + package_name, environment=True) + print(f"Uninstalled {package_name}") + + # Installs/Updates the project dependencies + run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) + + # The following dependencies are for CUDA, not CPU + # Parse output of 'pip show torch' to determine torch version + torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) + torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0] + + # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm check for pytorch-cuda as well for backwards compatibility + if '+cu' not in torver and '+rocm' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: + clear_cache() + return + + if not os.path.exists("repositories/"): + os.mkdir("repositories") + + os.chdir("repositories") + + # Install or update exllama as needed + if not os.path.exists("exllama/"): + run_cmd("git clone https://github.com/turboderp/exllama.git", environment=True) + else: + os.chdir("exllama") + run_cmd("git pull", environment=True) + os.chdir("..") + + # Pre-installed exllama module does not support AMD GPU + if '+rocm' in torver: + run_cmd("python -m pip uninstall -y exllama", environment=True) + # Get download URL for latest exllama ROCm wheel + exllama_rocm = run_cmd('curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm: + run_cmd("python -m pip install " + exllama_rocm, environment=True) + + # Fix JIT compile issue with exllama in Linux/WSL + if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): + run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) + + # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa + if sys.platform.startswith("linux"): + gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) + if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: + # Install the correct version of g++ + run_cmd("conda install -y -k conda-forge::gxx_linux-64=11.2.0", environment=True) + + # Install/Update ROCm AutoGPTQ for AMD GPUs + if '+rocm' in torver: + auto_gptq_version = [req for req in textgen_requirements if req.startswith('https://github.com/PanQiWei/AutoGPTQ/releases/download/')][0].split('/')[7] + auto_gptq_wheel = run_cmd(f'curl -s https://api.github.com/repos/PanQiWei/AutoGPTQ/releases/tags/{auto_gptq_version} | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + if not auto_gptq_wheel and run_cmd(f"python -m pip install {auto_gptq_wheel} --force-reinstall --no-deps", environment=True).returncode != 0: + print_big_message("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ.") + + # Install GPTQ-for-LLaMa for ROCm + if '+rocm' in torver: + gptq_wheel = run_cmd('curl -s https://api.github.com/repos/jllllll/GPTQ-for-LLaMa-CUDA/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') + install_gptq = run_cmd("python -m pip install " + gptq_wheel, environment=True).returncode == 0 + if install_gptq: + print("Wheel installation success!") + else: + print("ERROR: GPTQ wheel installation failed.") + print("You will not be able to use GPTQ-based models with GPTQ-for-LLaMa.") + + clear_cache() + + +def download_model(): + os.chdir("text-generation-webui") + run_cmd("python download-model.py", environment=True) + + +def launch_webui(): + os.chdir("text-generation-webui") + run_cmd(f"python server.py {CMD_FLAGS}", environment=True) + + +if __name__ == "__main__": + # Verifies we are in a conda environment + check_env() + + parser = argparse.ArgumentParser() + parser.add_argument('--update', action='store_true', help='Update the web UI.') + args = parser.parse_args() + + if args.update: + update_dependencies() + else: + # If webui has already been installed, skip and run + if not os.path.exists("text-generation-webui/"): + install_dependencies() + os.chdir(script_dir) + + # Check if a model has been downloaded yet + if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: + print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.") + + # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist + conda_path_bin = os.path.join(conda_env_path, "bin") + if not os.path.exists(conda_path_bin): + os.mkdir(conda_path_bin) + + # Launch the webui + launch_webui() diff --git a/wsl.sh b/wsl.sh new file mode 100755 index 00000000..50b8d77d --- /dev/null +++ b/wsl.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# detect if build-essential is missing or broken +if ! dpkg-query -W -f'${Status}' "build-essential" 2>/dev/null | grep -q "ok installed"; then +echo "build-essential not found or broken! + +A C++ compiler is required to build needed Python packages! +To install one, run cmd_wsl.bat and enter these commands: + +sudo apt-get update +sudo apt-get install build-essential +" +read -n1 -p "Continue the installer anyway? [y,n]" EXIT_PROMPT +# only continue if user inputs 'y' else exit +if ! [[ $EXIT_PROMPT == "Y" || $EXIT_PROMPT == "y" ]]; then exit; fi +fi + +# deactivate any currently active conda env +conda deactivate 2> /dev/null + +# config unlike other scripts, can't use current directory due to file IO bug in WSL, needs to be in virtual drive +INSTALL_DIR="$HOME/text-gen-install" +CONDA_ROOT_PREFIX="$INSTALL_DIR/installer_files/conda" +INSTALL_ENV_DIR="$INSTALL_DIR/installer_files/env" +MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-x86_64.sh" +conda_exists="F" + +# environment isolation +export PYTHONNOUSERSITE=1 +unset PYTHONPATH +unset PYTHONHOME +export CUDA_PATH="$INSTALL_ENV_DIR" +export CUDA_HOME="$CUDA_PATH" + +# /usr/lib/wsl/lib needs to be added to LD_LIBRARY_PATH to fix years-old bug in WSL where GPU drivers aren't linked properly +export LD_LIBRARY_PATH="$CUDA_HOME/lib:/usr/lib/wsl/lib:$LD_LIBRARY_PATH" + +# open bash cli if called with 'wsl.sh cmd' with workarounds for existing conda +if [ "$1" == "cmd" ]; then + exec bash --init-file <(echo ". ~/.bashrc; conda deactivate 2> /dev/null; cd $INSTALL_DIR || cd $HOME; source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh; conda activate $INSTALL_ENV_DIR") + exit +fi + +if [[ "$INSTALL_DIR" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi + +# create install dir if missing and copy webui.py to install dir to maintain functionality without edit +if [ ! -d "$INSTALL_DIR" ]; then mkdir -p "$INSTALL_DIR" || exit; fi +cp -u "./webui.py" "$INSTALL_DIR" +if [ -f "./CMD_FLAGS.txt" ]; then cp -u "./CMD_FLAGS.txt" "$INSTALL_DIR"; fi + +# figure out whether git and conda needs to be installed +if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi + +# (if necessary) install git and conda into a contained environment +# download miniconda +if [ "$conda_exists" == "F" ]; then + echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" + + curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" + + chmod u+x "$INSTALL_DIR/miniconda_installer.sh" + bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX + + # test the conda binary + echo "Miniconda version:" + "$CONDA_ROOT_PREFIX/bin/conda" --version +fi + +cd $INSTALL_DIR + +# create the installer env +if [ ! -e "$INSTALL_ENV_DIR" ]; then + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 +fi + +# check if conda environment was actually created +if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then + echo "Conda environment is empty." + exit +fi + +# activate installer env +source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) +conda activate "$INSTALL_ENV_DIR" + +# setup installer env update env if called with 'wsl.sh update' +case "$1" in +("update") python webui.py --update;; +(*) python webui.py;; +esac From fc2b831692e80a28e778e9eadebe00801f44defe Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:42:37 -0700 Subject: [PATCH 110/133] Basic changes --- webui.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/webui.py b/webui.py index 813ab838..8707e7f7 100644 --- a/webui.py +++ b/webui.py @@ -112,16 +112,12 @@ def install_dependencies(): else: run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) - # Clone webui to our computer - run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) - # Install the webui dependencies update_dependencies(initial_installation=True) def update_dependencies(initial_installation=False): - os.chdir("text-generation-webui") - run_cmd("git pull", assert_success=True, environment=True) + # run_cmd("git pull", assert_success=True, environment=True) # TODO uncomment before merging (is there a better way?) # Install the extensions dependencies (only on the first install) if initial_installation: @@ -215,12 +211,10 @@ def update_dependencies(initial_installation=False): def download_model(): - os.chdir("text-generation-webui") run_cmd("python download-model.py", environment=True) def launch_webui(): - os.chdir("text-generation-webui") run_cmd(f"python server.py {CMD_FLAGS}", environment=True) @@ -236,7 +230,8 @@ if __name__ == "__main__": update_dependencies() else: # If webui has already been installed, skip and run - if not os.path.exists("text-generation-webui/"): + # if not os.path.exists("text-generation-webui/"): + if True: # TODO implement a new installation check install_dependencies() os.chdir(script_dir) From 193fe18c8c10448f75d995ebf622215c9923aaaf Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:45:11 -0700 Subject: [PATCH 111/133] Resolve conflicts --- .gitignore | 3 --- README.md | 18 ------------------ webui.py | 23 ----------------------- 3 files changed, 44 deletions(-) diff --git a/.gitignore b/.gitignore index 76c8e03f..f66d828c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -<<<<<<< HEAD cache characters training/datasets @@ -34,6 +33,4 @@ models/config-user.yaml .DS_Store Thumbs.db -======= installer_files/ ->>>>>>> second-repo/main diff --git a/README.md b/README.md index acff9c2f..c3bfe137 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # Text generation web UI A Gradio web UI for Large Language Models. @@ -391,20 +390,3 @@ If you would like to contribute to the project, check out the [Contributing guid ## Acknowledgment In August 2023, [Andreessen Horowitz](https://a16z.com/) (a16z) provided a generous grant to encourage and support my independent work on this project. I am **extremely** grateful for their trust and recognition, which will allow me to dedicate more time towards realizing the full potential of text-generation-webui. -======= -# One-click installers - -These are automated installers for [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui). - -The idea is to allow people to use the program without having to type commands in the terminal, thus making it more accessible. - -## How it works - -The `start` scripts download miniconda, create a conda environment inside the current folder, and then install the webui using that environment. - -After the initial installation, the `update` scripts are then used to automatically pull the latest text-generation-webui code and upgrade its requirements. - -## Limitations - -* The start/update scripts themselves are not automatically updated. To update them, you have to re-download the zips listed on the [main README](https://github.com/oobabooga/text-generation-webui#one-click-installers) and overwrite your existing files. ->>>>>>> second-repo/main diff --git a/webui.py b/webui.py index 083272ed..8707e7f7 100644 --- a/webui.py +++ b/webui.py @@ -112,23 +112,12 @@ def install_dependencies(): else: run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) -<<<<<<< HEAD -======= - # Clone webui to our computer - run_cmd("git clone https://github.com/oobabooga/text-generation-webui.git", assert_success=True, environment=True) - ->>>>>>> second-repo/main # Install the webui dependencies update_dependencies(initial_installation=True) def update_dependencies(initial_installation=False): -<<<<<<< HEAD # run_cmd("git pull", assert_success=True, environment=True) # TODO uncomment before merging (is there a better way?) -======= - os.chdir("text-generation-webui") - run_cmd("git pull", assert_success=True, environment=True) ->>>>>>> second-repo/main # Install the extensions dependencies (only on the first install) if initial_installation: @@ -222,18 +211,10 @@ def update_dependencies(initial_installation=False): def download_model(): -<<<<<<< HEAD -======= - os.chdir("text-generation-webui") ->>>>>>> second-repo/main run_cmd("python download-model.py", environment=True) def launch_webui(): -<<<<<<< HEAD -======= - os.chdir("text-generation-webui") ->>>>>>> second-repo/main run_cmd(f"python server.py {CMD_FLAGS}", environment=True) @@ -249,12 +230,8 @@ if __name__ == "__main__": update_dependencies() else: # If webui has already been installed, skip and run -<<<<<<< HEAD # if not os.path.exists("text-generation-webui/"): if True: # TODO implement a new installation check -======= - if not os.path.exists("text-generation-webui/"): ->>>>>>> second-repo/main install_dependencies() os.chdir(script_dir) From 6bbfc40d10a7d2e671558a2f928285b6744c4a3c Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:51:58 -0500 Subject: [PATCH 112/133] Add .git creation to installer --- webui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 8707e7f7..f30c59a0 100644 --- a/webui.py +++ b/webui.py @@ -117,7 +117,12 @@ def install_dependencies(): def update_dependencies(initial_installation=False): - # run_cmd("git pull", assert_success=True, environment=True) # TODO uncomment before merging (is there a better way?) + # Create .git directory if missing + if not os.path.isdir(os.path.join(script_dir, ".git")): + git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git remote set-head origin -a && git reset origin/HEAD && git branch --set-upstream-to=origin/HEAD' + run_cmd(git_creation_cmd, environment=True, assert_success=True) + + run_cmd("git pull", assert_success=True, environment=True) # TODO is there a better way? # Install the extensions dependencies (only on the first install) if initial_installation: From cd1049ededa4dc694c360b53801d12fdcaa0bb3d Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 21 Sep 2023 21:52:29 -0500 Subject: [PATCH 113/133] Add Conda env deactivation to installer scripts Avoids conflicts with existing Conda installations --- cmd_linux.sh | 3 +++ cmd_macos.sh | 4 ++-- cmd_windows.bat | 3 +++ start_linux.sh | 3 +++ start_macos.sh | 3 +++ start_windows.bat | 3 +++ update_linux.sh | 3 +++ update_macos.sh | 3 +++ update_windows.bat | 3 +++ wsl.sh | 4 ++-- 10 files changed, 28 insertions(+), 4 deletions(-) diff --git a/cmd_linux.sh b/cmd_linux.sh index 0a4ef620..1685050a 100755 --- a/cmd_linux.sh +++ b/cmd_linux.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null + # config CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" diff --git a/cmd_macos.sh b/cmd_macos.sh index 0cec16e9..1b052e5c 100755 --- a/cmd_macos.sh +++ b/cmd_macos.sh @@ -4,8 +4,8 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi -# deactivate existing env if needed -conda deactivate 2> /dev/null +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null # config CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" diff --git a/cmd_windows.bat b/cmd_windows.bat index 606ff485..b219cf66 100755 --- a/cmd_windows.bat +++ b/cmd_windows.bat @@ -10,6 +10,9 @@ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which c set TMP=%cd%\installer_files set TEMP=%cd%\installer_files +@rem deactivate existing conda envs as needed to avoid conflicts +(conda deactivate && conda deactivate && conda deactivate) 2>null + @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env diff --git a/start_linux.sh b/start_linux.sh index dc37f612..1de8ccdf 100755 --- a/start_linux.sh +++ b/start_linux.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null + OS_ARCH=$(uname -m) case "${OS_ARCH}" in x86_64*) OS_ARCH="x86_64";; diff --git a/start_macos.sh b/start_macos.sh index a813edb3..131a8af5 100755 --- a/start_macos.sh +++ b/start_macos.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null + # M Series or Intel OS_ARCH=$(uname -m) case "${OS_ARCH}" in diff --git a/start_windows.bat b/start_windows.bat index 4f5b0ba5..3140f70f 100755 --- a/start_windows.bat +++ b/start_windows.bat @@ -17,6 +17,9 @@ set SPCHARMESSAGE= set TMP=%cd%\installer_files set TEMP=%cd%\installer_files +@rem deactivate existing conda envs as needed to avoid conflicts +(conda deactivate && conda deactivate && conda deactivate) 2>null + @rem config set INSTALL_DIR=%cd%\installer_files set CONDA_ROOT_PREFIX=%cd%\installer_files\conda diff --git a/update_linux.sh b/update_linux.sh index f7be8440..e641fafb 100755 --- a/update_linux.sh +++ b/update_linux.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null + # config CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" diff --git a/update_macos.sh b/update_macos.sh index f7be8440..e641fafb 100755 --- a/update_macos.sh +++ b/update_macos.sh @@ -4,6 +4,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")" if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null + # config CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" INSTALL_ENV_DIR="$(pwd)/installer_files/env" diff --git a/update_windows.bat b/update_windows.bat index a44e2188..6f01ee03 100755 --- a/update_windows.bat +++ b/update_windows.bat @@ -10,6 +10,9 @@ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which c set TMP=%cd%\installer_files set TEMP=%cd%\installer_files +@rem deactivate existing conda envs as needed to avoid conflicts +(conda deactivate && conda deactivate && conda deactivate) 2>null + @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda set INSTALL_ENV_DIR=%cd%\installer_files\env diff --git a/wsl.sh b/wsl.sh index 50b8d77d..73a20a86 100755 --- a/wsl.sh +++ b/wsl.sh @@ -15,8 +15,8 @@ read -n1 -p "Continue the installer anyway? [y,n]" EXIT_PROMPT if ! [[ $EXIT_PROMPT == "Y" || $EXIT_PROMPT == "y" ]]; then exit; fi fi -# deactivate any currently active conda env -conda deactivate 2> /dev/null +# deactivate existing conda envs as needed to avoid conflicts +{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null # config unlike other scripts, can't use current directory due to file IO bug in WSL, needs to be in virtual drive INSTALL_DIR="$HOME/text-gen-install" From 498552a92bf9b40eaea0fdae028ce1c940a157cc Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 21 Sep 2023 22:12:16 -0500 Subject: [PATCH 114/133] More robust installation check for installer --- webui.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index f30c59a0..0fcdaabd 100644 --- a/webui.py +++ b/webui.py @@ -2,6 +2,7 @@ import argparse import glob import os import subprocess +import site import sys script_dir = os.getcwd() @@ -77,6 +78,16 @@ def clear_cache(): run_cmd("conda clean -a -y", environment=True) run_cmd("python -m pip cache purge", environment=True) +def is_installed(): + for sitedir in site.getsitepackages(): + if "site-packages" in sitedir and conda_env_path in sitedir: + site_packages_path = sitedir + break + + if site_packages_path: + return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py')) + else: + return os.path.isdir(conda_env_path) def install_dependencies(): # Select your GPU or, choose to run in CPU mode @@ -235,8 +246,7 @@ if __name__ == "__main__": update_dependencies() else: # If webui has already been installed, skip and run - # if not os.path.exists("text-generation-webui/"): - if True: # TODO implement a new installation check + if not is_installed(): install_dependencies() os.chdir(script_dir) From 9054c98ecac8c7b2d40c84262e92cfe6cd6a1e5a Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:00:33 -0500 Subject: [PATCH 115/133] Use --autostash on git pull --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 0fcdaabd..19847879 100644 --- a/webui.py +++ b/webui.py @@ -133,7 +133,7 @@ def update_dependencies(initial_installation=False): git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git remote set-head origin -a && git reset origin/HEAD && git branch --set-upstream-to=origin/HEAD' run_cmd(git_creation_cmd, environment=True, assert_success=True) - run_cmd("git pull", assert_success=True, environment=True) # TODO is there a better way? + run_cmd("git pull --autostash", assert_success=True, environment=True) # TODO is there a better way? # Install the extensions dependencies (only on the first install) if initial_installation: From 060bb76aa077fc03c0278eb9b77005239f12be19 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Fri, 22 Sep 2023 01:10:30 -0500 Subject: [PATCH 116/133] Update WSL installer --- wsl.sh | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/wsl.sh b/wsl.sh index 73a20a86..6660bc2e 100755 --- a/wsl.sh +++ b/wsl.sh @@ -19,7 +19,11 @@ fi { conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null # config unlike other scripts, can't use current directory due to file IO bug in WSL, needs to be in virtual drive -INSTALL_DIR="$HOME/text-gen-install" +INSTALL_DIR_PREFIX="$HOME/text-gen-install" +if [[ ! $(realpath "$(pwd)/..") = /mnt/* ]]; then + INSTALL_DIR_PREFIX="$(realpath "$(pwd)/..")" && INSTALL_INPLACE=1 +fi +INSTALL_DIR="$INSTALL_DIR_PREFIX/text-generation-webui" CONDA_ROOT_PREFIX="$INSTALL_DIR/installer_files/conda" INSTALL_ENV_DIR="$INSTALL_DIR/installer_files/env" MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-Linux-x86_64.sh" @@ -66,11 +70,9 @@ if [ "$conda_exists" == "F" ]; then "$CONDA_ROOT_PREFIX/bin/conda" --version fi -cd $INSTALL_DIR - # create the installer env if [ ! -e "$INSTALL_ENV_DIR" ]; then - "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 + "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 git fi # check if conda environment was actually created @@ -83,6 +85,24 @@ fi source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" +# copy webui.py and CMD_FLAGS.txt to install dir to allow edits within Windows +if [[ $INSTALL_INPLACE != 1 ]]; then + cp -u "./webui.py" "$INSTALL_DIR" + if [ -f "./CMD_FLAGS.txt" ]; then cp -u "./CMD_FLAGS.txt" "$INSTALL_DIR"; fi +fi + +cd $INSTALL_DIR + +if [ ! -f "./server.py" ]; then + git init -b main + git remote add origin https://github.com/oobabooga/text-generation-webui + git fetch + git remote set-head origin -a + git reset origin/HEAD --hard + git branch --set-upstream-to=origin/HEAD + git restore -- . :!./webui.py :!./CMD_FLAGS.txt +fi + # setup installer env update env if called with 'wsl.sh update' case "$1" in ("update") python webui.py --update;; From 69b0aedd95d96cb0f1a0f7869e2b8a00df9aeaf8 Mon Sep 17 00:00:00 2001 From: jllllll <3887729+jllllll@users.noreply.github.com> Date: Fri, 22 Sep 2023 01:12:08 -0500 Subject: [PATCH 117/133] Fix missing models warning --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 19847879..d5071c80 100644 --- a/webui.py +++ b/webui.py @@ -251,7 +251,7 @@ if __name__ == "__main__": os.chdir(script_dir) # Check if a model has been downloaded yet - if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: + if len([item for item in glob.glob('models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.") # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist From 66363a4d703b46ee6bcd991f262127a7ea90aec5 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:02:21 -0700 Subject: [PATCH 118/133] Minor changes / reorder some functions --- webui.py | 112 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 63 insertions(+), 49 deletions(-) diff --git a/webui.py b/webui.py index d5071c80..aaa77885 100644 --- a/webui.py +++ b/webui.py @@ -8,6 +8,11 @@ import sys script_dir = os.getcwd() conda_env_path = os.path.join(script_dir, "installer_files", "env") +# Remove the '# ' from the following lines as needed for your AMD GPU on Linux +# os.environ["ROCM_PATH"] = '/opt/rocm' +# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0' +# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' + # Command-line flags if "OOBABOOGA_FLAGS" in os.environ: CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] @@ -23,42 +28,28 @@ else: CMD_FLAGS = '' -# Remove the '# ' from the following lines as needed for your AMD GPU on Linux -# os.environ["ROCM_PATH"] = '/opt/rocm' -# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0' -# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' +def is_linux(): + return sys.platform.startswith("linux") -def print_big_message(message): - message = message.strip() - lines = message.split('\n') - print("\n\n*******************************************************************") - for line in lines: - if line.strip() != '': - print("*", line) - - print("*******************************************************************\n\n") +def is_windows(): + return sys.platform.startswith("win") -def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): - # Use the conda environment - if environment: - if sys.platform.startswith("win"): - conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") - cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd - else: - conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") - cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd +def is_macos(): + return sys.platform.startswith("darwin") - # Run shell commands - result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) - # Assert the command ran successfully - if assert_success and result.returncode != 0: - print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") - sys.exit() +def is_installed(): + for sitedir in site.getsitepackages(): + if "site-packages" in sitedir and conda_env_path in sitedir: + site_packages_path = sitedir + break - return result + if site_packages_path: + return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py')) + else: + return os.path.isdir(conda_env_path) def check_env(): @@ -78,19 +69,41 @@ def clear_cache(): run_cmd("conda clean -a -y", environment=True) run_cmd("python -m pip cache purge", environment=True) -def is_installed(): - for sitedir in site.getsitepackages(): - if "site-packages" in sitedir and conda_env_path in sitedir: - site_packages_path = sitedir - break - if site_packages_path: - return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py')) - else: - return os.path.isdir(conda_env_path) +def print_big_message(message): + message = message.strip() + lines = message.split('\n') + print("\n\n*******************************************************************") + for line in lines: + if line.strip() != '': + print("*", line) + + print("*******************************************************************\n\n") + + +def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): + # Use the conda environment + if environment: + if is_windows(): + conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") + cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd + else: + conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") + cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd + + # Run shell commands + result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) + + # Assert the command ran successfully + if assert_success and result.returncode != 0: + print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") + sys.exit() + + return result + def install_dependencies(): - # Select your GPU or, choose to run in CPU mode + print("What is your GPU") print() print("A) NVIDIA") @@ -98,27 +111,28 @@ def install_dependencies(): print("C) Apple M Series") print("D) None (I want to run models in CPU mode)") print() + + # Select your GPU, or choose to run in CPU mode gpuchoice = input("Input> ").lower() while gpuchoice not in ['a', 'b', 'c', 'd']: print("Invalid choice. Please try again.") gpuchoice = input("Input> ").lower() - if gpuchoice == "d": print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") - # Install the version of PyTorch needed + # Install Pytorch if gpuchoice == "a": run_cmd('conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) - elif gpuchoice == "b" and not sys.platform.startswith("darwin"): - if sys.platform.startswith("linux"): + elif gpuchoice == "b" and not is_macos(): + if is_linux(): run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2', assert_success=True, environment=True) else: print("AMD GPUs are only supported on Linux. Exiting...") sys.exit() - elif (gpuchoice == "c" or gpuchoice == "b") and sys.platform.startswith("darwin"): + elif (gpuchoice == "c" or gpuchoice == "b") and is_macos(): run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) elif gpuchoice == "d" or gpuchoice == "c": - if sys.platform.startswith("linux"): + if is_linux(): run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu", assert_success=True, environment=True) else: run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) @@ -132,8 +146,8 @@ def update_dependencies(initial_installation=False): if not os.path.isdir(os.path.join(script_dir, ".git")): git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git remote set-head origin -a && git reset origin/HEAD && git branch --set-upstream-to=origin/HEAD' run_cmd(git_creation_cmd, environment=True, assert_success=True) - - run_cmd("git pull --autostash", assert_success=True, environment=True) # TODO is there a better way? + + run_cmd("git pull --autostash", assert_success=True, environment=True) # Install the extensions dependencies (only on the first install) if initial_installation: @@ -196,11 +210,11 @@ def update_dependencies(initial_installation=False): run_cmd("python -m pip install " + exllama_rocm, environment=True) # Fix JIT compile issue with exllama in Linux/WSL - if sys.platform.startswith("linux") and not os.path.exists(f"{conda_env_path}/lib64"): + if is_linux() and not os.path.exists(f"{conda_env_path}/lib64"): run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa - if sys.platform.startswith("linux"): + if is_linux(): gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: # Install the correct version of g++ From 86648d4085bec7b0aa3ebc0e1e5ced9b4f0c1782 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 08:13:11 -0700 Subject: [PATCH 119/133] Remove CUDA, keep only pytorch --- CMD_FLAGS.txt | 1 + webui.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CMD_FLAGS.txt b/CMD_FLAGS.txt index e69de29b..47fe8405 100644 --- a/CMD_FLAGS.txt +++ b/CMD_FLAGS.txt @@ -0,0 +1 @@ +--listen diff --git a/webui.py b/webui.py index aaa77885..4225cd7e 100644 --- a/webui.py +++ b/webui.py @@ -122,7 +122,7 @@ def install_dependencies(): # Install Pytorch if gpuchoice == "a": - run_cmd('conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) + run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) elif gpuchoice == "b" and not is_macos(): if is_linux(): run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2', assert_success=True, environment=True) From 8ab3eca9ecf15f9de28aea845e93e0f674769450 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 09:35:19 -0700 Subject: [PATCH 120/133] Add a warning for outdated installations --- modules/one_click_installer_check.py | 8 ++++++++ server.py | 1 + 2 files changed, 9 insertions(+) create mode 100644 modules/one_click_installer_check.py diff --git a/modules/one_click_installer_check.py b/modules/one_click_installer_check.py new file mode 100644 index 00000000..29c0c546 --- /dev/null +++ b/modules/one_click_installer_check.py @@ -0,0 +1,8 @@ +from pathlib import Path +from modules.logging_colors import logger + +if Path('../webui.py').exists(): + logger.warning('\nIt looks like you are running an outdated version of ' + 'the one-click-installers.\n' + 'Please migrate your installation following the instructions here:\n' + 'https://') diff --git a/server.py b/server.py index fc99ef72..6f7b03ec 100644 --- a/server.py +++ b/server.py @@ -1,6 +1,7 @@ import os import warnings +import modules.one_click_installer_check from modules.block_requests import OpenMonkeyPatch, RequestBlocker from modules.logging_colors import logger From 3314b7d7952734ca45c6c87aad40806e9760cf27 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:03:56 -0700 Subject: [PATCH 121/133] Allow start scripts to have command-line flags --- CMD_FLAGS.txt | 1 - start_linux.sh | 2 +- start_macos.sh | 2 +- start_windows.bat | 2 +- start_wsl.bat | 2 +- webui.py | 5 +++-- wsl.sh | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CMD_FLAGS.txt b/CMD_FLAGS.txt index 47fe8405..e69de29b 100644 --- a/CMD_FLAGS.txt +++ b/CMD_FLAGS.txt @@ -1 +0,0 @@ ---listen diff --git a/start_linux.sh b/start_linux.sh index 1de8ccdf..6792902a 100755 --- a/start_linux.sh +++ b/start_linux.sh @@ -64,4 +64,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # setup installer env -python webui.py +python webui.py $@ diff --git a/start_macos.sh b/start_macos.sh index 131a8af5..5f5bb439 100755 --- a/start_macos.sh +++ b/start_macos.sh @@ -64,4 +64,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # setup installer env -python webui.py +python webui.py $@ diff --git a/start_windows.bat b/start_windows.bat index 3140f70f..77ba93bf 100755 --- a/start_windows.bat +++ b/start_windows.bat @@ -67,7 +67,7 @@ set "CUDA_HOME=%CUDA_PATH%" call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) @rem setup installer env -call python webui.py +call python webui.py %* @rem below are functions for the script next line skips these during normal execution goto end diff --git a/start_wsl.bat b/start_wsl.bat index 41fa572f..26d2f8a6 100755 --- a/start_wsl.bat +++ b/start_wsl.bat @@ -5,7 +5,7 @@ cd /D "%~dp0" set PATH=%PATH%;%SystemRoot%\system32 @rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script -call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh" +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh $@" :end pause diff --git a/webui.py b/webui.py index 4225cd7e..9d2fbaf1 100644 --- a/webui.py +++ b/webui.py @@ -245,7 +245,8 @@ def download_model(): def launch_webui(): - run_cmd(f"python server.py {CMD_FLAGS}", environment=True) + flags = [flag for flag in sys.argv[1:] if flag != '--update'] + run_cmd(f"python server.py {' '.join(flags)} {CMD_FLAGS}", environment=True) if __name__ == "__main__": @@ -254,7 +255,7 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--update', action='store_true', help='Update the web UI.') - args = parser.parse_args() + args, _ = parser.parse_known_args() if args.update: update_dependencies() diff --git a/wsl.sh b/wsl.sh index 6660bc2e..6b62adca 100755 --- a/wsl.sh +++ b/wsl.sh @@ -106,5 +106,5 @@ fi # setup installer env update env if called with 'wsl.sh update' case "$1" in ("update") python webui.py --update;; -(*) python webui.py;; +(*) python webui.py $@;; esac From 2d2a8cfb48f1d8f48c40328d4f3e30db748afc94 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:08:08 -0700 Subject: [PATCH 122/133] Remove a file --- generate_zips.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 generate_zips.sh diff --git a/generate_zips.sh b/generate_zips.sh deleted file mode 100644 index f3131745..00000000 --- a/generate_zips.sh +++ /dev/null @@ -1,6 +0,0 @@ -mkdir oobabooga_{windows,linux,macos,wsl} -for p in windows macos linux wsl; do - if [ "$p" == "wsl" ]; then cp {*$p*\.*,webui.py,INSTRUCTIONS-WSL.txt,CMD_FLAGS.txt} oobabooga_$p; - else cp {*$p*\.*,webui.py,INSTRUCTIONS.txt,CMD_FLAGS.txt} oobabooga_$p; fi - zip -r oobabooga_$p.zip oobabooga_$p; -done From b4b5f455584cc8519e78ad6717725a952a7f6ce6 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:28:22 -0700 Subject: [PATCH 123/133] Join the installation instructions --- INSTRUCTIONS.txt | 38 ----------- .../new/Installation Instructions.md | 68 ++++++++++++------- 2 files changed, 43 insertions(+), 63 deletions(-) delete mode 100644 INSTRUCTIONS.txt rename INSTRUCTIONS-WSL.txt => docs/new/Installation Instructions.md (55%) diff --git a/INSTRUCTIONS.txt b/INSTRUCTIONS.txt deleted file mode 100644 index 6637b65a..00000000 --- a/INSTRUCTIONS.txt +++ /dev/null @@ -1,38 +0,0 @@ -Thank you for downloading oobabooga/text-generation-webui! - -# Installation - -Run the "start" script. It will install the web UI and all of its -dependencies inside this folder. - -To launch the web UI in the future after it is already installed, run the -"start" script again. - -# Updating the web UI - -Run the "update" script. It will install the updates only, so it should -be much faster than the initial installation. - -# Adding flags like --model, --api, etc - -Open the "CMD_FLAGS.txt" file with a text editor, add your flags, and -save the file. For instance, to add the --api flag, change the file -contents to - ---api - -# Running an interactive shell - -Sometimes you may need to install some additional Python package. To do -that, run the "cmd" script and type your commands inside the terminal -window that will appear. - -# Using an AMD GPU in Linux - -Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. Some systems may also -need: sudo apt-get install libstdc++-12-dev - -Edit the "webui.py" script using a text editor and un-comment and -modify the lines near the top of the script according to your setup. In -particular, modify the os.environ["ROCM_PATH"] = '/opt/rocm' line to -point to your ROCm installation. diff --git a/INSTRUCTIONS-WSL.txt b/docs/new/Installation Instructions.md similarity index 55% rename from INSTRUCTIONS-WSL.txt rename to docs/new/Installation Instructions.md index b2dabdd3..cd43a5f8 100644 --- a/INSTRUCTIONS-WSL.txt +++ b/docs/new/Installation Instructions.md @@ -1,6 +1,41 @@ -Thank you for downloading oobabooga/text-generation-webui. +# Installation -# WSL setup +Run the "start" script. It will install the web UI and all of its +dependencies inside this folder. + +To launch the web UI in the future after it is already installed, run the +"start" script again. + +## Updating the web UI + +Run the "update" script. It will install the updates only, so it should +be much faster than the initial installation. + +## Adding flags like --model, --api, etc + +Open the "CMD_FLAGS.txt" file with a text editor, add your flags, and +save the file. For instance, to add the --api flag, change the file +contents to + +--api + +## Running an interactive shell + +Sometimes you may need to install some additional Python package. To do +that, run the "cmd" script and type your commands inside the terminal +window that will appear. + +## Using an AMD GPU in Linux + +Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. Some systems may also +need: sudo apt-get install libstdc++-12-dev + +Edit the "webui.py" script using a text editor and un-comment and +modify the lines near the top of the script according to your setup. In +particular, modify the os.environ["ROCM_PATH"] = '/opt/rocm' line to +point to your ROCm installation. + +## WSL special instructions If you do not have WSL installed, see here: https://learn.microsoft.com/en-us/windows/wsl/install @@ -28,7 +63,7 @@ Do this by using these commands: wsl -l wsl -s -# Web UI Installation +### Web UI Installation Run the "start" script. By default it will install the web UI in WSL: /home/{username}/text-gen-install @@ -36,35 +71,18 @@ Run the "start" script. By default it will install the web UI in WSL: To launch the web UI in the future after it is already installed, run the same "start" script. Ensure that webui.py and wsl.sh are next to it! -# Updating the web UI +### Updating the web UI -Run the "update" script. This will only install the updates, so it should -be much faster than the initial installation. +As an alternative to running the "update" script, you can also run "wsl.sh update" in WSL. -You can also run "wsl.sh update" in WSL. - -# Adding flags like --chat, --notebook, etc - -Edit the "webui.py" script using a text editor and add the desired flags -to the CMD_FLAGS variable at the top. It should look like this: - -CMD_FLAGS = '--chat' - -For instance, to add the --api flag, change it to - -CMD_FLAGS = '--chat --api' - -The "start" and "update" scripts will copy the edited "webui.py" to WSL -to be used by the web UI. - -# Running an interactive shell +### Running an interactive shell To run an interactive shell in the miniconda environment, run the "cmd" script. This is useful for installing additional requirements manually. -You can also run "wsl.sh cmd" in WSL. +As an alternative to running the "cmd" script, you can also run "wsl.sh cmd" in WSL. -# Changing the default install location +### Changing the default install location To change this, you will need to edit the scripts as follows: wsl.sh: line ~22 INSTALL_DIR="/path/to/install/dir" From c74326de023a5f5b4005684a05cd87116efce8bc Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:37:22 -0700 Subject: [PATCH 124/133] Fixes by @jllllll --- cmd_windows.bat | 2 +- start_windows.bat | 2 +- start_wsl.bat | 2 +- update_windows.bat | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd_windows.bat b/cmd_windows.bat index b219cf66..82e1dac9 100755 --- a/cmd_windows.bat +++ b/cmd_windows.bat @@ -11,7 +11,7 @@ set TMP=%cd%\installer_files set TEMP=%cd%\installer_files @rem deactivate existing conda envs as needed to avoid conflicts -(conda deactivate && conda deactivate && conda deactivate) 2>null +(conda deactivate && conda deactivate && conda deactivate) 2>nul @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda diff --git a/start_windows.bat b/start_windows.bat index 77ba93bf..c75708ab 100755 --- a/start_windows.bat +++ b/start_windows.bat @@ -18,7 +18,7 @@ set TMP=%cd%\installer_files set TEMP=%cd%\installer_files @rem deactivate existing conda envs as needed to avoid conflicts -(conda deactivate && conda deactivate && conda deactivate) 2>null +(conda deactivate && conda deactivate && conda deactivate) 2>nul @rem config set INSTALL_DIR=%cd%\installer_files diff --git a/start_wsl.bat b/start_wsl.bat index 26d2f8a6..d7bacead 100755 --- a/start_wsl.bat +++ b/start_wsl.bat @@ -5,7 +5,7 @@ cd /D "%~dp0" set PATH=%PATH%;%SystemRoot%\system32 @rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script -call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh $@" +call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh %*" :end pause diff --git a/update_windows.bat b/update_windows.bat index 6f01ee03..944ec32b 100755 --- a/update_windows.bat +++ b/update_windows.bat @@ -11,7 +11,7 @@ set TMP=%cd%\installer_files set TEMP=%cd%\installer_files @rem deactivate existing conda envs as needed to avoid conflicts -(conda deactivate && conda deactivate && conda deactivate) 2>null +(conda deactivate && conda deactivate && conda deactivate) 2>nul @rem config set CONDA_ROOT_PREFIX=%cd%\installer_files\conda From ccfc9190163c658d26c4f69b0350d29b10c974d0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:51:21 -0700 Subject: [PATCH 125/133] Make webui.py more readable --- webui.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/webui.py b/webui.py index 9d2fbaf1..60484e44 100644 --- a/webui.py +++ b/webui.py @@ -113,29 +113,30 @@ def install_dependencies(): print() # Select your GPU, or choose to run in CPU mode - gpuchoice = input("Input> ").lower() - while gpuchoice not in ['a', 'b', 'c', 'd']: + choice = input("Input> ").upper() + while choice not in ['A', 'B', 'C', 'D']: print("Invalid choice. Please try again.") - gpuchoice = input("Input> ").lower() - if gpuchoice == "d": + choice = input("Input> ").upper() + if choice == "D": print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") - # Install Pytorch - if gpuchoice == "a": - run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117', assert_success=True, environment=True) - elif gpuchoice == "b" and not is_macos(): + # Find the proper Pytorch installation command + install_git = "conda install -y -k ninja git" + install_pytorch = "python -m pip install torch torchvision torchaudio" + + if choice == "A": + install_pytorch = "python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117" + elif is_macos() and choice == "B": if is_linux(): - run_cmd('conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2', assert_success=True, environment=True) + install_pytorch = "python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2" else: print("AMD GPUs are only supported on Linux. Exiting...") sys.exit() - elif (gpuchoice == "c" or gpuchoice == "b") and is_macos(): - run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) - elif gpuchoice == "d" or gpuchoice == "c": - if is_linux(): - run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu", assert_success=True, environment=True) - else: - run_cmd("conda install -y -k ninja git && python -m pip install torch torchvision torchaudio", assert_success=True, environment=True) + elif is_linux() and (choice == "C" or choice == "D"): + install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" + + # Install Git and then Pytorch + run_cmd(f"{install_git} && {install_pytorch}", assert_success=True, environment=True) # Install the webui dependencies update_dependencies(initial_installation=True) From 967dda17a0cc760dccf0b61e359a471c29124b7c Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:52:52 -0700 Subject: [PATCH 126/133] Remove OOBABOOGA_FLAGS --- webui.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/webui.py b/webui.py index 60484e44..3aa4a3e9 100644 --- a/webui.py +++ b/webui.py @@ -14,18 +14,12 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") # os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' # Command-line flags -if "OOBABOOGA_FLAGS" in os.environ: - CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] - print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':") - print(CMD_FLAGS) - print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n") +cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") +if os.path.exists(cmd_flags_path): + with open(cmd_flags_path, 'r') as f: + CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) else: - cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") - if os.path.exists(cmd_flags_path): - with open(cmd_flags_path, 'r') as f: - CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) - else: - CMD_FLAGS = '' + CMD_FLAGS = '' def is_linux(): From fe2acdf45fddffe2c96f604814ef60b52bc86ab6 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 15:52:20 -0300 Subject: [PATCH 127/133] Update README.md --- README.md | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c3bfe137..b90d04cd 100644 --- a/README.md +++ b/README.md @@ -30,15 +30,28 @@ To learn how to use the various features, check out the Documentation: https://g ### One-click installers -| Windows | Linux | macOS | WSL | -|--------|--------|--------|--------| -| [oobabooga-windows.zip](https://github.com/oobabooga/text-generation-webui/releases/download/installers/oobabooga_windows.zip) | [oobabooga-linux.zip](https://github.com/oobabooga/text-generation-webui/releases/download/installers/oobabooga_linux.zip) |[oobabooga-macos.zip](https://github.com/oobabooga/text-generation-webui/releases/download/installers/oobabooga_macos.zip) | [oobabooga-wsl.zip](https://github.com/oobabooga/text-generation-webui/releases/download/installers/oobabooga_wsl.zip) | +1) Run the `start_linux.sh`, `start_windows.bat`, `start_macos.sh`, or `start_wsl.bat` script depending on your OS. +2) Select your GPU vendor when asked. +3) Have fun! -Just download the zip above, extract it, and double-click on "start". The web UI and all its dependencies will be installed in the same folder. +#### More information -* The source codes and more information can be found here: https://github.com/oobabooga/one-click-installers -* There is no need to run the installers as admin. -* Huge thanks to [@jllllll](https://github.com/jllllll), [@ClayShoaf](https://github.com/ClayShoaf), and [@xNul](https://github.com/xNul) for their contributions to these installers. +The script will create a folder called `installer_files` where it will download Miniconda, create a Conda environment, and install the webui requirements. The installation is self-contained: if you want to reinstall, just delete `installer_files` and run the start script again. + +The same `start` script should be used to launch the webui in the future after it is already installed. + +To update, run `update_linux.sh`, `update_windows.bat`, `update_macos.sh`, or `update_wsl.bat`. + +To run commands in the `installer_files` environment, run the appropriate cmd script: `cmd_linux.sh`, `cmd_windows.bat`, `cmd_macos.sh`, or `cmd_wsl.bat`. + +To define persistent command-line flags like `--listen` or `--api`, edit the `CMD_FLAGS.txt` file with a text editor and add them there. Flags can also be provided directly to the start scripts, for instance, `./start-linux.sh --listen`. + +Some observations: + +* There is no need to run any of those scripts as admin/root. +* If you need to move your installation folder, you will need to delete `installer_files` and install again, as some links will be broken. So choose a good location for your `text-generation-webui` folder before installing. +* Additional instructions for WSL and AMD users can be found here: [Instructions](https://github.com/oobabooga/text-generation-webui/blob/one-click/docs/new/Installation%20Instructions.md). +* The installer has been tested mostly on NVIDIA GPUs. If you can find a way to improve it for your AMD/Intel Arc/Mac Metal GPU, you are highly encouraged to submit a PR to this repository. ### Manual installation using Conda From 6c5f81f002dba4da623e64f89efc218d28d61185 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 12:00:06 -0700 Subject: [PATCH 128/133] Rename webui.py to one_click.py --- README.md | 2 +- webui.py => one_click.py | 0 start_linux.sh | 2 +- start_macos.sh | 2 +- start_windows.bat | 2 +- update_linux.sh | 2 +- update_macos.sh | 2 +- update_windows.bat | 2 +- wsl.sh | 14 +++++++------- 9 files changed, 14 insertions(+), 14 deletions(-) rename webui.py => one_click.py (100%) diff --git a/README.md b/README.md index b90d04cd..a65c170f 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Some observations: * There is no need to run any of those scripts as admin/root. * If you need to move your installation folder, you will need to delete `installer_files` and install again, as some links will be broken. So choose a good location for your `text-generation-webui` folder before installing. * Additional instructions for WSL and AMD users can be found here: [Instructions](https://github.com/oobabooga/text-generation-webui/blob/one-click/docs/new/Installation%20Instructions.md). -* The installer has been tested mostly on NVIDIA GPUs. If you can find a way to improve it for your AMD/Intel Arc/Mac Metal GPU, you are highly encouraged to submit a PR to this repository. +* The installer has been tested mostly on NVIDIA GPUs. If you can find a way to improve it for your AMD/Intel Arc/Mac Metal GPU, you are highly encouraged to submit a PR to this repository. The main file to be edited is `one_click.py`. ### Manual installation using Conda diff --git a/webui.py b/one_click.py similarity index 100% rename from webui.py rename to one_click.py diff --git a/start_linux.sh b/start_linux.sh index 6792902a..d9d2ab07 100755 --- a/start_linux.sh +++ b/start_linux.sh @@ -64,4 +64,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # setup installer env -python webui.py $@ +python one_click.py $@ diff --git a/start_macos.sh b/start_macos.sh index 5f5bb439..7fa82d81 100755 --- a/start_macos.sh +++ b/start_macos.sh @@ -64,4 +64,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # setup installer env -python webui.py $@ +python one_click.py $@ diff --git a/start_windows.bat b/start_windows.bat index c75708ab..da8050f0 100755 --- a/start_windows.bat +++ b/start_windows.bat @@ -67,7 +67,7 @@ set "CUDA_HOME=%CUDA_PATH%" call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) @rem setup installer env -call python webui.py %* +call python one_click.py %* @rem below are functions for the script next line skips these during normal execution goto end diff --git a/update_linux.sh b/update_linux.sh index e641fafb..28686b0f 100755 --- a/update_linux.sh +++ b/update_linux.sh @@ -23,7 +23,7 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # update installer env -python webui.py --update +python one_click.py --update echo echo "Done!" diff --git a/update_macos.sh b/update_macos.sh index e641fafb..28686b0f 100755 --- a/update_macos.sh +++ b/update_macos.sh @@ -23,7 +23,7 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a conda activate "$INSTALL_ENV_DIR" # update installer env -python webui.py --update +python one_click.py --update echo echo "Done!" diff --git a/update_windows.bat b/update_windows.bat index 944ec32b..b08773cf 100755 --- a/update_windows.bat +++ b/update_windows.bat @@ -28,7 +28,7 @@ set "CUDA_HOME=%CUDA_PATH%" call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end ) @rem update installer env -call python webui.py --update +call python one_click.py --update echo. echo Done! diff --git a/wsl.sh b/wsl.sh index 6b62adca..4ff72c04 100755 --- a/wsl.sh +++ b/wsl.sh @@ -47,9 +47,9 @@ fi if [[ "$INSTALL_DIR" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi -# create install dir if missing and copy webui.py to install dir to maintain functionality without edit +# create install dir if missing and copy one_click.py to install dir to maintain functionality without edit if [ ! -d "$INSTALL_DIR" ]; then mkdir -p "$INSTALL_DIR" || exit; fi -cp -u "./webui.py" "$INSTALL_DIR" +cp -u "./one_click.py" "$INSTALL_DIR" if [ -f "./CMD_FLAGS.txt" ]; then cp -u "./CMD_FLAGS.txt" "$INSTALL_DIR"; fi # figure out whether git and conda needs to be installed @@ -85,9 +85,9 @@ fi source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) conda activate "$INSTALL_ENV_DIR" -# copy webui.py and CMD_FLAGS.txt to install dir to allow edits within Windows +# copy one_click.py and CMD_FLAGS.txt to install dir to allow edits within Windows if [[ $INSTALL_INPLACE != 1 ]]; then - cp -u "./webui.py" "$INSTALL_DIR" + cp -u "./one_click.py" "$INSTALL_DIR" if [ -f "./CMD_FLAGS.txt" ]; then cp -u "./CMD_FLAGS.txt" "$INSTALL_DIR"; fi fi @@ -100,11 +100,11 @@ if [ ! -f "./server.py" ]; then git remote set-head origin -a git reset origin/HEAD --hard git branch --set-upstream-to=origin/HEAD - git restore -- . :!./webui.py :!./CMD_FLAGS.txt + git restore -- . :!./one_click.py :!./CMD_FLAGS.txt fi # setup installer env update env if called with 'wsl.sh update' case "$1" in -("update") python webui.py --update;; -(*) python webui.py $@;; +("update") python one_click.py --update;; +(*) python one_click.py $@;; esac From 0fee18e8b7867c9a33032149019709b37eccfaed Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 12:08:05 -0700 Subject: [PATCH 129/133] Rename some functions --- one_click.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/one_click.py b/one_click.py index 3aa4a3e9..c73ab042 100644 --- a/one_click.py +++ b/one_click.py @@ -96,7 +96,7 @@ def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, return result -def install_dependencies(): +def install_webui(): print("What is your GPU") print() @@ -132,11 +132,12 @@ def install_dependencies(): # Install Git and then Pytorch run_cmd(f"{install_git} && {install_pytorch}", assert_success=True, environment=True) - # Install the webui dependencies - update_dependencies(initial_installation=True) + # Install the webui requirements + update_requirements(initial_installation=True) -def update_dependencies(initial_installation=False): +def update_requirements(initial_installation=False): + # Create .git directory if missing if not os.path.isdir(os.path.join(script_dir, ".git")): git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git remote set-head origin -a && git reset origin/HEAD && git branch --set-upstream-to=origin/HEAD' @@ -144,11 +145,11 @@ def update_dependencies(initial_installation=False): run_cmd("git pull --autostash", assert_success=True, environment=True) - # Install the extensions dependencies (only on the first install) + # Initial installation only: install the extensions requirements if initial_installation: extensions = next(os.walk("extensions"))[1] for extension in extensions: - if extension in ['superbooga']: # No wheels available for dependencies + if extension in ['superbooga']: # No wheels available for requirements continue extension_req_path = os.path.join("extensions", extension, "requirements.txt") @@ -157,7 +158,7 @@ def update_dependencies(initial_installation=False): textgen_requirements = open("requirements.txt").read().splitlines() - # Workaround for git+ packages not updating properly Also store requirements.txt for later use + # Workaround for git+ packages not updating properly. Also store requirements.txt for later use git_requirements = [req for req in textgen_requirements if req.startswith("git+")] # Loop through each "git+" requirement and uninstall it @@ -170,15 +171,15 @@ def update_dependencies(initial_installation=False): run_cmd("python -m pip uninstall -y " + package_name, environment=True) print(f"Uninstalled {package_name}") - # Installs/Updates the project dependencies + # Install/update the project requirements run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True) - # The following dependencies are for CUDA, not CPU + # The following requirements are for CUDA, not CPU # Parse output of 'pip show torch' to determine torch version torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True) torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0] - # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm check for pytorch-cuda as well for backwards compatibility + # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility if '+cu' not in torver and '+rocm' not in torver and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: clear_cache() return @@ -188,7 +189,7 @@ def update_dependencies(initial_installation=False): os.chdir("repositories") - # Install or update exllama as needed + # Install or update ExLlama as needed if not os.path.exists("exllama/"): run_cmd("git clone https://github.com/turboderp/exllama.git", environment=True) else: @@ -196,15 +197,15 @@ def update_dependencies(initial_installation=False): run_cmd("git pull", environment=True) os.chdir("..") - # Pre-installed exllama module does not support AMD GPU + # Pre-installed ExLlama module does not support AMD GPU if '+rocm' in torver: run_cmd("python -m pip uninstall -y exllama", environment=True) - # Get download URL for latest exllama ROCm wheel + # Get download URL for latest ExLlama ROCm wheel exllama_rocm = run_cmd('curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8') if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm: run_cmd("python -m pip install " + exllama_rocm, environment=True) - # Fix JIT compile issue with exllama in Linux/WSL + # Fix JIT compile issue with ExLlama in Linux/WSL if is_linux() and not os.path.exists(f"{conda_env_path}/lib64"): run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) @@ -253,11 +254,11 @@ if __name__ == "__main__": args, _ = parser.parse_known_args() if args.update: - update_dependencies() + update_requirements() else: # If webui has already been installed, skip and run if not is_installed(): - install_dependencies() + install_webui() os.chdir(script_dir) # Check if a model has been downloaded yet From c33a94e381efc36b8b7916f56967cc32bb85c7b5 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 12:16:31 -0700 Subject: [PATCH 130/133] Rename doc file --- README.md | 2 +- ...nstructions.md => One-Click-Installers.md} | 36 +++---------------- 2 files changed, 5 insertions(+), 33 deletions(-) rename docs/{new/Installation Instructions.md => One-Click-Installers.md} (66%) diff --git a/README.md b/README.md index a65c170f..ec70b9fe 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ Some observations: * There is no need to run any of those scripts as admin/root. * If you need to move your installation folder, you will need to delete `installer_files` and install again, as some links will be broken. So choose a good location for your `text-generation-webui` folder before installing. -* Additional instructions for WSL and AMD users can be found here: [Instructions](https://github.com/oobabooga/text-generation-webui/blob/one-click/docs/new/Installation%20Instructions.md). +* Additional instructions for WSL and AMD can be found here: [Instructions](https://github.com/oobabooga/text-generation-webui/blob/one-click/docs/One-Click-Installers.md). * The installer has been tested mostly on NVIDIA GPUs. If you can find a way to improve it for your AMD/Intel Arc/Mac Metal GPU, you are highly encouraged to submit a PR to this repository. The main file to be edited is `one_click.py`. ### Manual installation using Conda diff --git a/docs/new/Installation Instructions.md b/docs/One-Click-Installers.md similarity index 66% rename from docs/new/Installation Instructions.md rename to docs/One-Click-Installers.md index cd43a5f8..56db9a12 100644 --- a/docs/new/Installation Instructions.md +++ b/docs/One-Click-Installers.md @@ -1,41 +1,16 @@ -# Installation - -Run the "start" script. It will install the web UI and all of its -dependencies inside this folder. - -To launch the web UI in the future after it is already installed, run the -"start" script again. - -## Updating the web UI - -Run the "update" script. It will install the updates only, so it should -be much faster than the initial installation. - -## Adding flags like --model, --api, etc - -Open the "CMD_FLAGS.txt" file with a text editor, add your flags, and -save the file. For instance, to add the --api flag, change the file -contents to - ---api - -## Running an interactive shell - -Sometimes you may need to install some additional Python package. To do -that, run the "cmd" script and type your commands inside the terminal -window that will appear. +# Additional one-click installers info ## Using an AMD GPU in Linux Requires ROCm SDK 5.4.2 or 5.4.3 to be installed. Some systems may also need: sudo apt-get install libstdc++-12-dev -Edit the "webui.py" script using a text editor and un-comment and +Edit the "one_click.py" script using a text editor and un-comment and modify the lines near the top of the script according to your setup. In particular, modify the os.environ["ROCM_PATH"] = '/opt/rocm' line to point to your ROCm installation. -## WSL special instructions +## WSL instructions If you do not have WSL installed, see here: https://learn.microsoft.com/en-us/windows/wsl/install @@ -69,7 +44,7 @@ Run the "start" script. By default it will install the web UI in WSL: /home/{username}/text-gen-install To launch the web UI in the future after it is already installed, run -the same "start" script. Ensure that webui.py and wsl.sh are next to it! +the same "start" script. Ensure that one_click.py and wsl.sh are next to it! ### Updating the web UI @@ -77,9 +52,6 @@ As an alternative to running the "update" script, you can also run "wsl.sh updat ### Running an interactive shell -To run an interactive shell in the miniconda environment, run the "cmd" -script. This is useful for installing additional requirements manually. - As an alternative to running the "cmd" script, you can also run "wsl.sh cmd" in WSL. ### Changing the default install location From 589ee9f62356178c94e7281bfc9e3c9a8d965e22 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:21:48 -0300 Subject: [PATCH 131/133] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ec70b9fe..c8a387da 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,9 @@ To learn how to use the various features, check out the Documentation: https://g #### More information -The script will create a folder called `installer_files` where it will download Miniconda, create a Conda environment, and install the webui requirements. The installation is self-contained: if you want to reinstall, just delete `installer_files` and run the start script again. +The script will create a folder called `installer_files` where it will create a Conda environment using Miniconda. The installation is self-contained: if you want to reinstall, just delete `installer_files` and run the start script again. -The same `start` script should be used to launch the webui in the future after it is already installed. +To launch the webui in the future after it is already installed, run the same `start` script. To update, run `update_linux.sh`, `update_windows.bat`, `update_macos.sh`, or `update_wsl.bat`. From c075969875ee95667690d6be7c02f7d8c4206db4 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 22 Sep 2023 13:10:03 -0700 Subject: [PATCH 132/133] Add instructions --- modules/one_click_installer_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/one_click_installer_check.py b/modules/one_click_installer_check.py index 29c0c546..1a7dd2b9 100644 --- a/modules/one_click_installer_check.py +++ b/modules/one_click_installer_check.py @@ -5,4 +5,4 @@ if Path('../webui.py').exists(): logger.warning('\nIt looks like you are running an outdated version of ' 'the one-click-installers.\n' 'Please migrate your installation following the instructions here:\n' - 'https://') + 'https://github.com/oobabooga/text-generation-webui/wiki/Migrating-an-old-one%E2%80%90click-install') From 7f0ea4dc16651d23bb97c3795b02f0f14eee2de4 Mon Sep 17 00:00:00 2001 From: deevis Date: Fri, 22 Sep 2023 14:22:24 -0600 Subject: [PATCH 133/133] feature: allow comments in CMD_FLAGS.txt (#127) --------- Co-authored-by: missionfloyd --- one_click.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/one_click.py b/one_click.py index c73ab042..0717bbf6 100644 --- a/one_click.py +++ b/one_click.py @@ -17,7 +17,7 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env") cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") if os.path.exists(cmd_flags_path): with open(cmd_flags_path, 'r') as f: - CMD_FLAGS = ' '.join(line.strip() for line in f.read().splitlines() if line.strip()) + CMD_FLAGS = ' '.join(line.strip() for line in f if line.strip() and not line.strip().startswith('#')) else: CMD_FLAGS = ''