Remove specialized code for gpt-4chan

This commit is contained in:
oobabooga 2024-04-04 16:10:47 -07:00
parent 3952560da8
commit 13fe38eb27
6 changed files with 3 additions and 182 deletions

View file

@ -1,73 +0,0 @@
#parent #container {
background-color: #eef2ff;
padding: 17px;
}
#parent #container .reply {
background-color: rgb(214 218 240);
border-bottom: 1px solid rgb(183 197 217);
border-image: none 100% 1 0 stretch;
border-left: 0 none rgb(0 0 0);
border-right: 1px solid rgb(183 197 217);
color: rgb(0 0 0);
display: table;
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
margin: 4px 0;
overflow: hidden hidden;
padding: 4px 2px;
}
#parent #container .number {
color: rgb(0 0 0);
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
width: 342.65px;
margin-right: 7px;
}
#parent #container .op {
color: rgb(0 0 0);
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
margin: 4px 0 8px;
overflow: hidden hidden;
}
#parent #container .op blockquote {
margin-left: 0 !important;
}
#parent #container .name {
color: rgb(17 119 67);
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
font-weight: 700;
margin-left: 7px;
}
#parent #container .quote {
color: rgb(221 0 0);
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
text-decoration: underline solid rgb(221 0 0);
text-decoration-thickness: auto;
}
#parent #container .greentext {
color: rgb(120 153 34);
font-family: arial, helvetica, sans-serif;
font-size: 13.3333px;
}
#parent #container blockquote {
margin: 0 !important;
margin-block: 1em 1em;
margin-inline: 40px 40px;
margin: 13.33px 40px !important;
}
#parent #container .message_4chan {
color: black;
border: none;
}

View file

@ -13,29 +13,6 @@ Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126
This file will be automatically detected the next time you start the web UI.
## GPT-4chan
[GPT-4chan](https://huggingface.co/ykilcher/gpt-4chan) has been shut down from Hugging Face, so you need to download it elsewhere. You have two options:
* Torrent: [16-bit](https://archive.org/details/gpt4chan_model_float16) / [32-bit](https://archive.org/details/gpt4chan_model)
* Direct download: [16-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model_float16/) / [32-bit](https://theswissbay.ch/pdf/_notpdf_/gpt4chan_model/)
The 32-bit version is only relevant if you intend to run the model in CPU mode. Otherwise, you should use the 16-bit version.
After downloading the model, follow these steps:
1. Place the files under `models/gpt4chan_model_float16` or `models/gpt4chan_model`.
2. Place GPT-J 6B's config.json file in that same folder: [config.json](https://huggingface.co/EleutherAI/gpt-j-6B/raw/main/config.json).
3. Download GPT-J 6B's tokenizer files (they will be automatically detected when you attempt to load GPT-4chan):
```
python download-model.py EleutherAI/gpt-j-6B --text-only
```
When you load this model in default or notebook modes, the "HTML" tab will show the generated text in 4chan format:
![Image3](https://github.com/oobabooga/screenshots/raw/main/gpt4chan.png)
## Using LoRAs with GPTQ-for-LLaMa
This requires using a monkey patch that is supported by this web UI: https://github.com/johnsmith0031/alpaca_lora_4bit

View file

@ -16,8 +16,6 @@ image_cache = {}
with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r') as f:
readable_css = f.read()
with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f:
_4chan_css = css_f.read()
with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f:
instruct_css = f.read()
@ -118,63 +116,6 @@ def generate_basic_html(string):
return string
def process_post(post, c):
t = post.split('\n')
number = t[0].split(' ')[1]
if len(t) > 1:
src = '\n'.join(t[1:])
else:
src = ''
src = re.sub('>', '>', src)
src = re.sub('(&gt;&gt;[0-9]*)', '<span class="quote">\\1</span>', src)
src = re.sub('\n', '<br>\n', src)
src = f'<blockquote class="message_4chan">{src}\n'
src = f'<span class="name">Anonymous </span> <span class="number">No.{number}</span>\n{src}'
return src
def generate_4chan_html(f):
posts = []
post = ''
c = -2
for line in f.splitlines():
line += "\n"
if line == '-----\n':
continue
elif line.startswith('--- '):
c += 1
if post != '':
src = process_post(post, c)
posts.append(src)
post = line
else:
post += line
if post != '':
src = process_post(post, c)
posts.append(src)
for i in range(len(posts)):
if i == 0:
posts[i] = f'<div class="op">{posts[i]}</div>\n'
else:
posts[i] = f'<div class="reply">{posts[i]}</div>\n'
output = ''
output += f'<style>{_4chan_css}</style><div id="parent"><div id="container">'
for post in posts:
output += post
output += '</div></div>'
output = output.split('\n')
for i in range(len(output)):
output[i] = re.sub(r'^(&gt;(.*?)(<br>|</div>))', r'<span class="greentext">\1</span>', output[i])
output[i] = re.sub(r'^<blockquote class="message_4chan">(&gt;(.*?)(<br>|</div>))', r'<blockquote class="message_4chan"><span class="greentext">\1</span>', output[i])
output = '\n'.join(output)
return output
def make_thumbnail(image):
image = image.resize((350, round(image.size[1] / image.size[0] * 350)), Image.Resampling.LANCZOS)
if image.size[1] > 470:

View file

@ -110,9 +110,7 @@ def load_model(model_name, loader=None):
def load_tokenizer(model_name, model):
tokenizer = None
path_to_model = Path(f"{shared.args.model_dir}/{model_name}/")
if any(s in model_name.lower() for s in ['gpt-4chan', 'gpt4chan']) and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists():
tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/"))
elif path_to_model.exists():
if path_to_model.exists():
if shared.args.no_use_fast:
logger.info('Loading the tokenizer with use_fast=False.')

View file

@ -22,7 +22,7 @@ from modules.callbacks import (
from modules.extensions import apply_extensions
from modules.grammar.grammar_utils import initialize_grammar
from modules.grammar.logits_process import GrammarConstrainedLogitsProcessor
from modules.html_generator import generate_4chan_html, generate_basic_html
from modules.html_generator import generate_basic_html
from modules.logging_colors import logger
from modules.models import clear_torch_cache, local_rank
@ -186,23 +186,7 @@ def generate_reply_wrapper(question, state, stopping_strings=None):
def formatted_outputs(reply, model_name):
if any(s in model_name for s in ['gpt-4chan', 'gpt4chan']):
reply = fix_gpt4chan(reply)
return html.unescape(reply), generate_4chan_html(reply)
else:
return html.unescape(reply), generate_basic_html(reply)
def fix_gpt4chan(s):
"""
Removes empty replies from gpt4chan outputs
"""
for i in range(10):
s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
s = re.sub("--- [0-9]*\n *\n---", "---", s)
s = re.sub("--- [0-9]*\n\n\n---", "---", s)
return s
return html.unescape(reply), generate_basic_html(reply)
def fix_galactica(s):

View file

@ -1,6 +0,0 @@
-----
--- 865467536
Hello, AI frens!
How are you doing on this fine day?
--- 865467537