diff --git a/css/html_4chan_style.css b/css/html_4chan_style.css deleted file mode 100644 index afbfb537..00000000 --- a/css/html_4chan_style.css +++ /dev/null @@ -1,73 +0,0 @@ -#parent #container { - background-color: #eef2ff; - padding: 17px; -} - -#parent #container .reply { - background-color: rgb(214 218 240); - border-bottom: 1px solid rgb(183 197 217); - border-image: none 100% 1 0 stretch; - border-left: 0 none rgb(0 0 0); - border-right: 1px solid rgb(183 197 217); - color: rgb(0 0 0); - display: table; - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - margin: 4px 0; - overflow: hidden hidden; - padding: 4px 2px; -} - -#parent #container .number { - color: rgb(0 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - width: 342.65px; - margin-right: 7px; -} - -#parent #container .op { - color: rgb(0 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - margin: 4px 0 8px; - overflow: hidden hidden; -} - -#parent #container .op blockquote { - margin-left: 0 !important; -} - -#parent #container .name { - color: rgb(17 119 67); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - font-weight: 700; - margin-left: 7px; -} - -#parent #container .quote { - color: rgb(221 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - text-decoration: underline solid rgb(221 0 0); - text-decoration-thickness: auto; -} - -#parent #container .greentext { - color: rgb(120 153 34); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; -} - -#parent #container blockquote { - margin: 0 !important; - margin-block: 1em 1em; - margin-inline: 40px 40px; - margin: 13.33px 40px !important; -} - -#parent #container .message_4chan { - color: black; - border: none; -} \ No newline at end of file diff --git a/docs/08 - Additional Tips.md b/docs/08 - Additional Tips.md index 7ad00ee3..89675cca 100644 --- a/docs/08 - Additional Tips.md +++ b/docs/08 - Additional Tips.md @@ -13,29 +13,6 @@ Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126 This file will be automatically detected the next time you start the web UI. -## GPT-4chan - -[GPT-4chan](https://huggingface.co/ykilcher/gpt-4chan) has been shut down from Hugging Face, so you need to download it elsewhere. You have two options: - -* Torrent: [16-bit](https://archive.org/details/gpt4chan_model_float16) / [32-bit](https://archive.org/details/gpt4chan_model) -* Direct download: [16-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model_float16/) / [32-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model/) - -The 32-bit version is only relevant if you intend to run the model in CPU mode. Otherwise, you should use the 16-bit version. - -After downloading the model, follow these steps: - -1. Place the files under `models/gpt4chan_model_float16` or `models/gpt4chan_model`. -2. Place GPT-J 6B's config.json file in that same folder: [config.json](https://huggingface.co/EleutherAI/gpt-j-6B/raw/main/config.json). -3. Download GPT-J 6B's tokenizer files (they will be automatically detected when you attempt to load GPT-4chan): - -``` -python download-model.py EleutherAI/gpt-j-6B --text-only -``` - -When you load this model in default or notebook modes, the "HTML" tab will show the generated text in 4chan format: - -![Image3](https://github.com/oobabooga/screenshots/raw/main/gpt4chan.png) - ## Using LoRAs with GPTQ-for-LLaMa This requires using a monkey patch that is supported by this web UI: https://github.com/johnsmith0031/alpaca_lora_4bit diff --git a/modules/html_generator.py b/modules/html_generator.py index 278f1632..2be53fc8 100644 --- a/modules/html_generator.py +++ b/modules/html_generator.py @@ -16,8 +16,6 @@ image_cache = {} with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r') as f: readable_css = f.read() -with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f: - _4chan_css = css_f.read() with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f: instruct_css = f.read() @@ -118,63 +116,6 @@ def generate_basic_html(string): return string -def process_post(post, c): - t = post.split('\n') - number = t[0].split(' ')[1] - if len(t) > 1: - src = '\n'.join(t[1:]) - else: - src = '' - src = re.sub('>', '>', src) - src = re.sub('(>>[0-9]*)', '\\1', src) - src = re.sub('\n', '
\n', src) - src = f'
{src}\n' - src = f'Anonymous No.{number}\n{src}' - return src - - -def generate_4chan_html(f): - posts = [] - post = '' - c = -2 - for line in f.splitlines(): - line += "\n" - if line == '-----\n': - continue - elif line.startswith('--- '): - c += 1 - if post != '': - src = process_post(post, c) - posts.append(src) - post = line - else: - post += line - - if post != '': - src = process_post(post, c) - posts.append(src) - - for i in range(len(posts)): - if i == 0: - posts[i] = f'
{posts[i]}
\n' - else: - posts[i] = f'
{posts[i]}
\n' - - output = '' - output += f'
' - for post in posts: - output += post - - output += '
' - output = output.split('\n') - for i in range(len(output)): - output[i] = re.sub(r'^(>(.*?)(
|))', r'\1', output[i]) - output[i] = re.sub(r'^
(>(.*?)(
|))', r'
\1', output[i]) - - output = '\n'.join(output) - return output - - def make_thumbnail(image): image = image.resize((350, round(image.size[1] / image.size[0] * 350)), Image.Resampling.LANCZOS) if image.size[1] > 470: diff --git a/modules/models.py b/modules/models.py index 60568063..541c6301 100644 --- a/modules/models.py +++ b/modules/models.py @@ -110,9 +110,7 @@ def load_model(model_name, loader=None): def load_tokenizer(model_name, model): tokenizer = None path_to_model = Path(f"{shared.args.model_dir}/{model_name}/") - if any(s in model_name.lower() for s in ['gpt-4chan', 'gpt4chan']) and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists(): - tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/")) - elif path_to_model.exists(): + if path_to_model.exists(): if shared.args.no_use_fast: logger.info('Loading the tokenizer with use_fast=False.') diff --git a/modules/text_generation.py b/modules/text_generation.py index 43488852..724bb0f0 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -22,7 +22,7 @@ from modules.callbacks import ( from modules.extensions import apply_extensions from modules.grammar.grammar_utils import initialize_grammar from modules.grammar.logits_process import GrammarConstrainedLogitsProcessor -from modules.html_generator import generate_4chan_html, generate_basic_html +from modules.html_generator import generate_basic_html from modules.logging_colors import logger from modules.models import clear_torch_cache, local_rank @@ -186,23 +186,7 @@ def generate_reply_wrapper(question, state, stopping_strings=None): def formatted_outputs(reply, model_name): - if any(s in model_name for s in ['gpt-4chan', 'gpt4chan']): - reply = fix_gpt4chan(reply) - return html.unescape(reply), generate_4chan_html(reply) - else: - return html.unescape(reply), generate_basic_html(reply) - - -def fix_gpt4chan(s): - """ - Removes empty replies from gpt4chan outputs - """ - for i in range(10): - s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s) - s = re.sub("--- [0-9]*\n *\n---", "---", s) - s = re.sub("--- [0-9]*\n\n\n---", "---", s) - - return s + return html.unescape(reply), generate_basic_html(reply) def fix_galactica(s): diff --git a/prompts/GPT-4chan.txt b/prompts/GPT-4chan.txt deleted file mode 100644 index 1bc8c7f4..00000000 --- a/prompts/GPT-4chan.txt +++ /dev/null @@ -1,6 +0,0 @@ ------ ---- 865467536 -Hello, AI frens! -How are you doing on this fine day? ---- 865467537 -