Merge pull request #5181 from oobabooga/dev

Merge dev branch
This commit is contained in:
oobabooga 2024-01-05 18:42:30 -03:00 committed by GitHub
commit 8ea3f31601
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 81 additions and 64 deletions

View file

@ -18,7 +18,7 @@ with open(bias_file, "r") as f:
params = {
"activate": True,
"bias string": " *I am so happy*",
"use custom string": False,
"custom string": "",
}
@ -44,7 +44,7 @@ def bot_prefix_modifier(string):
behavior.
"""
if params['activate']:
if params['use custom string']:
if params['custom string'].strip() != '':
return f'{string} {params["custom string"].strip()} '
else:
return f'{string} {params["bias string"].strip()} '
@ -56,8 +56,7 @@ def ui():
# Gradio elements
activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown')
custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
custom_string = gr.Textbox(value=params['custom string'], placeholder="Enter custom bias string", label="Custom Character Bias", info='If not empty, will be used instead of the value above')
# Event functions to update the parameters in the backend
def update_bias_string(x):
@ -73,11 +72,3 @@ def ui():
dropdown_string.change(update_bias_string, dropdown_string, None)
custom_string.change(update_custom_string, custom_string, None)
activate.change(lambda x: params.update({"activate": x}), activate, None)
use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
# Group elements together depending on the selected option
def bias_string_group():
if use_custom_string.value:
return gr.Group([use_custom_string, custom_string])
else:
return dropdown_string

View file

@ -193,6 +193,7 @@ for (i = 0; i < slimDropdownElements.length; i++) {
var buttonsInChat = document.querySelectorAll("#chat-tab:not(.old-ui) #chat-buttons button");
var button = document.getElementById("hover-element-button");
var menu = document.getElementById("hover-menu");
var istouchscreen = (navigator.maxTouchPoints > 0) || "ontouchstart" in document.documentElement;
function showMenu() {
menu.style.display = "flex"; // Show the menu
@ -200,7 +201,9 @@ function showMenu() {
function hideMenu() {
menu.style.display = "none"; // Hide the menu
document.querySelector("#chat-input textarea").focus();
if (!istouchscreen) {
document.querySelector("#chat-input textarea").focus(); // Focus on the chat input
}
}
if (buttonsInChat.length > 0) {
@ -235,11 +238,18 @@ function isMouseOverButtonOrMenu() {
}
button.addEventListener("mouseenter", function () {
showMenu();
if (!istouchscreen) {
showMenu();
}
});
button.addEventListener("click", function () {
if (menu.style.display === "flex") {
hideMenu();
}
else {
showMenu();
}
});
// Add event listener for mouseleave on the button

View file

@ -560,17 +560,17 @@ def replace_character_names(text, name1, name2):
def generate_pfp_cache(character):
cache_folder = Path("cache")
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
if path.exists():
original_img = Image.open(path)
original_img.save(Path('cache/pfp_character.png'), format='PNG')
original_img.save(Path(f'{cache_folder}/pfp_character.png'), format='PNG')
thumb = make_thumbnail(original_img)
thumb.save(Path('cache/pfp_character_thumb.png'), format='PNG')
thumb.save(Path(f'{cache_folder}/pfp_character_thumb.png'), format='PNG')
return thumb
@ -594,8 +594,9 @@ def load_character(character, name1, name2):
file_contents = open(filepath, 'r', encoding='utf-8').read()
data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
cache_folder = Path(shared.args.disk_cache_dir)
for path in [Path("cache/pfp_character.png"), Path("cache/pfp_character_thumb.png")]:
for path in [Path(f"{cache_folder}/pfp_character.png"), Path(f"{cache_folder}/pfp_character_thumb.png")]:
if path.exists():
path.unlink()
@ -713,17 +714,17 @@ def check_tavern_character(img):
def upload_your_profile_picture(img):
cache_folder = Path("cache")
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
if img is None:
if Path("cache/pfp_me.png").exists():
Path("cache/pfp_me.png").unlink()
if Path(f"{cache_folder}/pfp_me.png").exists():
Path(f"{cache_folder}/pfp_me.png").unlink()
else:
img = make_thumbnail(img)
img.save(Path('cache/pfp_me.png'))
logger.info('Profile picture saved to "cache/pfp_me.png"')
img.save(Path(f'{cache_folder}/pfp_me.png'))
logger.info(f'Profile picture saved to "{cache_folder}/pfp_me.png"')
def generate_character_yaml(name, greeting, context):

View file

@ -8,6 +8,7 @@ import markdown
from PIL import Image, ImageOps
from modules.utils import get_available_chat_styles
from modules import shared
# This is to store the paths to the thumbnails of the profile pictures
image_cache = {}
@ -170,7 +171,7 @@ def make_thumbnail(image):
def get_image_cache(path):
cache_folder = Path("cache")
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
@ -178,8 +179,8 @@ def get_image_cache(path):
if (path in image_cache and mtime != image_cache[path][0]) or (path not in image_cache):
img = make_thumbnail(Image.open(path))
old_p = Path(f'cache/{path.name}_cache.png')
p = Path(f'cache/cache_{path.name}.png')
old_p = Path(f'{cache_folder}/{path.name}_cache.png')
p = Path(f'{cache_folder}/cache_{path.name}.png')
if old_p.exists():
old_p.rename(p)

View file

@ -6,6 +6,7 @@ import torch
import yaml
from transformers import is_torch_xpu_available
import extensions
from modules import shared
with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:
@ -204,7 +205,7 @@ def apply_interface_values(state, use_persistent=False):
return [state[k] if k in state else gr.update() for k in elements]
def save_settings(state, preset, extensions, show_controls):
def save_settings(state, preset, extensions_list, show_controls):
output = copy.deepcopy(shared.settings)
exclude = ['name2', 'greeting', 'context', 'turn_template']
for k in state:
@ -215,10 +216,19 @@ def save_settings(state, preset, extensions, show_controls):
output['prompt-default'] = state['prompt_menu-default']
output['prompt-notebook'] = state['prompt_menu-notebook']
output['character'] = state['character_menu']
output['default_extensions'] = extensions
output['default_extensions'] = extensions_list
output['seed'] = int(output['seed'])
output['show_controls'] = show_controls
# Save extension values in the UI
for extension_name in extensions_list:
extension = getattr(extensions, extension_name).script
if hasattr(extension, 'params'):
params = getattr(extension, 'params')
for param in params:
_id = f"{extension_name}-{param}"
output[_id] = params[param]
return yaml.dump(output, sort_keys=False, width=float("inf"))

View file

@ -28,7 +28,7 @@ def create_ui(default_preset):
with gr.Row():
with gr.Column():
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature')
shared.gradio['temperature'] = gr.Slider(0.01, 5, value=generate_params['temperature'], step=0.01, label='temperature')
shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
shared.gradio['min_p'] = gr.Slider(0.0, 1.0, value=generate_params['min_p'], step=0.01, label='min_p')
shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')

View file

@ -89,6 +89,7 @@ def torch_version():
torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'")
else:
from torch import __version__ as torver
return torver
@ -185,15 +186,28 @@ def install_webui():
print("Invalid choice. Please try again.")
choice = input("Input> ").upper()
if choice == "N":
print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.")
gpu_choice_to_name = {
"A": "NVIDIA",
"B": "AMD",
"C": "APPLE",
"D": "INTEL",
"N": "NONE"
}
selected_gpu = gpu_choice_to_name[choice]
if selected_gpu == "NONE":
with open(cmd_flags_path, 'r+') as cmd_flags_file:
if "--cpu" not in cmd_flags_file.read():
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
cmd_flags_file.write("\n--cpu")
# Find the proper Pytorch installation command
install_git = "conda install -y -k ninja git"
install_pytorch = "python -m pip install torch torchvision torchaudio"
install_pytorch = "python -m pip install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* "
use_cuda118 = "N"
if any((is_windows(), is_linux())) and choice == "A":
if any((is_windows(), is_linux())) and selected_gpu == "NVIDIA":
if "USE_CUDA118" in os.environ:
use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N"
else:
@ -203,29 +217,30 @@ def install_webui():
while use_cuda118 not in 'YN':
print("Invalid choice. Please try again.")
use_cuda118 = input("Input> ").upper().strip('"\'').strip()
if use_cuda118 == 'Y':
print("CUDA: 11.8")
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
else:
print("CUDA: 12.1")
install_pytorch = f"python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/{'cu121' if use_cuda118 == 'N' else 'cu118'}"
elif not is_macos() and choice == "B":
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
elif not is_macos() and selected_gpu == "AMD":
if is_linux():
install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6"
install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
else:
print("AMD GPUs are only supported on Linux. Exiting...")
sys.exit(1)
elif is_linux() and (choice == "C" or choice == "N"):
install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu"
elif choice == "D":
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 intel_extension_for_pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
elif is_linux() and selected_gpu in ["APPLE", "NONE"]:
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
elif selected_gpu == "INTEL":
install_pytorch += "intel_extension_for_pytorch==2.1.* --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
# Install Git and then Pytorch
print_big_message("Installing PyTorch.")
run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
# Install CUDA libraries (this wasn't necessary for Pytorch before...)
if choice == "A":
if selected_gpu == "NVIDIA":
print_big_message("Installing the CUDA runtime libraries.")
run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True)
@ -283,25 +298,15 @@ def update_requirements(initial_installation=False):
is_cpu = '+cpu' in torver # 2.0.1+cpu
if is_rocm:
if cpu_has_avx2():
requirements_file = "requirements_amd.txt"
else:
requirements_file = "requirements_amd_noavx2.txt"
elif is_cpu:
if cpu_has_avx2():
requirements_file = "requirements_cpu_only.txt"
else:
requirements_file = "requirements_cpu_only_noavx2.txt"
base_requirements = "requirements_amd" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
elif is_cpu or is_intel:
base_requirements = "requirements_cpu_only" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
elif is_macos():
if is_x86_64():
requirements_file = "requirements_apple_intel.txt"
else:
requirements_file = "requirements_apple_silicon.txt"
base_requirements = "requirements_apple_" + ("intel" if is_x86_64() else "silicon") + ".txt"
else:
if cpu_has_avx2():
requirements_file = "requirements.txt"
else:
requirements_file = "requirements_noavx2.txt"
base_requirements = "requirements" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
requirements_file = base_requirements
print_big_message(f"Installing webui requirements from file: {requirements_file}")
print(f"TORCH: {torver}\n")
@ -346,10 +351,6 @@ def update_requirements(initial_installation=False):
clear_cache()
def download_model():
run_cmd("python download-model.py", environment=True)
def launch_webui():
run_cmd(f"python server.py {flags}", environment=True)
@ -378,7 +379,7 @@ if __name__ == "__main__":
if '--model-dir' in flags:
# Splits on ' ' or '=' while maintaining spaces within quotes
flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags)
model_dir = [flags_list[(flags_list.index(flag)+1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
else:
model_dir = 'models'

3
setup.cfg Normal file
View file

@ -0,0 +1,3 @@
[pycodestyle]
max-line-length = 120
ignore = E402, E501, E722