Bump to pytorch 11.8 (#4209)

This commit is contained in:
oobabooga 2023-10-07 00:23:49 -03:00 committed by GitHub
parent 06fff3b2e9
commit d33facc9fe
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 65 additions and 40 deletions

View file

@ -78,7 +78,7 @@ bash Miniconda3.sh
#### 1. Create a new conda environment
```
conda create -n textgen python=3.10.9
conda create -n textgen python=3.10
conda activate textgen
```
@ -86,11 +86,11 @@ conda activate textgen
| System | GPU | Command |
|--------|---------|---------|
| Linux/WSL | NVIDIA | `pip3 install torch torchvision torchaudio` |
| Linux/WSL | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118` |
| Linux/WSL | CPU only | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu` |
| Linux | AMD | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2` |
| MacOS + MPS | Any | `pip3 install torch torchvision torchaudio` |
| Windows | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117` |
| Windows | NVIDIA | `pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118` |
| Windows | CPU only | `pip3 install torch torchvision torchaudio` |
The up-to-date commands can be found here: https://pytorch.org/get-started/locally/.

View file

@ -1,4 +1,4 @@
SpeechRecognition==3.10.0
openai-whisper
# openai-whisper
soundfile
ffmpeg

View file

@ -1,5 +1,6 @@
import argparse
import glob
import hashlib
import os
import platform
import re
@ -112,6 +113,15 @@ def print_big_message(message):
print("*******************************************************************\n\n")
def calculate_file_hash(file_path):
p = os.path.join(script_dir, file_path)
if os.path.isfile(p):
with open(p, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
else:
return ''
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
# Use the conda environment
if environment:
@ -161,8 +171,8 @@ def install_webui():
install_git = "conda install -y -k ninja git"
install_pytorch = "python -m pip install torch torchvision torchaudio"
if is_windows() and choice == "A":
install_pytorch = "python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117"
if any((is_windows(), is_linux())) and choice == "A":
install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
elif not is_macos() and choice == "B":
if is_linux():
install_pytorch = "python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2"
@ -187,7 +197,21 @@ def update_requirements(initial_installation=False):
git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git remote set-head origin -a && git reset origin/HEAD && git branch --set-upstream-to=origin/HEAD'
run_cmd(git_creation_cmd, environment=True, assert_success=True)
files_to_check = [
'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat',
'update_linux.sh', 'update_macos.sh', 'update_windows.bat', 'update_wsl.bat',
'one_click.py'
]
before_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
run_cmd("git pull --autostash", assert_success=True, environment=True)
after_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
# Check for differences in installation file hashes
for file_name in files_to_check:
if before_pull_hashes[file_name] != after_pull_hashes[file_name]:
print(f"File '{file_name}' was updated during 'git pull'. Please run the script again.")
exit(1)
# Extensions requirements are installed only during the initial install by default.
# That can be changed with the INSTALL_EXTENSIONS environment variable.
@ -210,7 +234,8 @@ def update_requirements(initial_installation=False):
# Detect the PyTorch version
torver = torch_version()
is_cuda = '+cu' in torver # 2.0.1+cu117
is_cuda = '+cu' in torver # 2.0.1+cu118
is_cuda117 = '+cu117' in torver # 2.0.1+cu117
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
is_cpu = '+cpu' in torver # 2.0.1+cpu
@ -236,25 +261,25 @@ def update_requirements(initial_installation=False):
else:
requirements_file = "requirements_noavx2.txt"
# Prepare the requirements file
print_big_message(f"Installing webui requirements from file: {requirements_file}")
textgen_requirements = open(requirements_file).read().splitlines()
if is_cuda117:
textgen_requirements = [req.replace('+cu118', '+cu117').replace('torch2.1', 'torch2.0') for req in textgen_requirements]
with open('temp_requirements.txt', 'w') as file:
file.write('\n'.join(textgen_requirements))
# Workaround for git+ packages not updating properly. Also store requirements.txt for later use
# Workaround for git+ packages not updating properly.
git_requirements = [req for req in textgen_requirements if req.startswith("git+")]
# Loop through each "git+" requirement and uninstall it
for req in git_requirements:
# Extract the package name from the "git+" requirement
url = req.replace("git+", "")
package_name = url.split("/")[-1].split("@")[0]
# Uninstall the package using pip
package_name = url.split("/")[-1].split("@")[0].rstrip(".git")
run_cmd("python -m pip uninstall -y " + package_name, environment=True)
print(f"Uninstalled {package_name}")
# Install/update the project requirements
run_cmd(f"python -m pip install -r {requirements_file} --upgrade", assert_success=True, environment=True)
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
os.remove('temp_requirements.txt')
# Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility
if not any((is_cuda, is_rocm)) and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1:

View file

@ -36,16 +36,16 @@ https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.11/llama_cpp_
https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.11/llama_cpp_python-0.2.11-cp310-cp310-win_amd64.whl; platform_system == "Windows"
# CUDA wheels
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu117-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.27+cu117-py3-none-any.whl
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.1.post1/flash_attn-2.3.1.post1+cu118torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu118-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.27+cu118-py3-none-any.whl
autoawq==0.1.4

View file

@ -36,16 +36,16 @@ https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.11+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows"
# CUDA wheels
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu117avx-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu117avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX/ctransformers-0.2.27+cu117-py3-none-any.whl
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/turboderp/exllamav2/releases/download/v0.0.5/exllamav2-0.0.5+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.1.post1/flash_attn-2.3.1.post1+cu118torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu118avx-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.11+cu118avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu118-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu118-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX/ctransformers-0.2.27+cu118-py3-none-any.whl
autoawq==0.1.4