Minor improvements

This commit is contained in:
oobabooga 2023-01-15 16:43:31 -03:00
parent c6083f3dca
commit e04ecd4bce
2 changed files with 7 additions and 7 deletions

View file

@ -7,6 +7,11 @@ This is a library for formatting gpt4chan outputs as nice HTML.
import re import re
from pathlib import Path from pathlib import Path
def generate_basic_html(s):
s = '\n'.join([f'<p style="margin-bottom: 20px">{line}</p>' for line in s.split('\n')])
s = f'<div style="max-width: 600px; margin-left: auto; margin-right: auto; background-color:#efefef; color:#0b0f19; padding:3em; font-size:1.1em; font-family: helvetica">{s}</div>'
return s
def process_post(post, c): def process_post(post, c):
t = post.split('\n') t = post.split('\n')
number = t[0].split(' ')[1] number = t[0].split(' ')[1]

View file

@ -119,11 +119,6 @@ def fix_galactica(s):
s = s.replace(r'$$', r'$') s = s.replace(r'$$', r'$')
return s return s
def generate_html(s):
s = '\n'.join([f'<p style="margin-bottom: 20px">{line}</p>' for line in s.split('\n')])
s = f'<div style="max-width: 600px; margin-left: auto; margin-right: auto; background-color:#eef2ff; color:#0b0f19; padding:3em; font-size:1.2em;">{s}</div>'
return s
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None): def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None):
global model, tokenizer, model_name, loaded_preset, preset global model, tokenizer, model_name, loaded_preset, preset
@ -157,12 +152,12 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
reply = reply.replace(r'<|endoftext|>', '') reply = reply.replace(r'<|endoftext|>', '')
if model_name.lower().startswith('galactica'): if model_name.lower().startswith('galactica'):
reply = fix_galactica(reply) reply = fix_galactica(reply)
return reply, reply, generate_html(reply) return reply, reply, generate_basic_html(reply)
elif model_name.lower().startswith('gpt4chan'): elif model_name.lower().startswith('gpt4chan'):
reply = fix_gpt4chan(reply) reply = fix_gpt4chan(reply)
return reply, 'Only applicable for galactica models.', generate_4chan_html(reply) return reply, 'Only applicable for galactica models.', generate_4chan_html(reply)
else: else:
return reply, 'Only applicable for galactica models.', generate_html(reply) return reply, 'Only applicable for galactica models.', generate_basic_html(reply)
# Choosing the default model # Choosing the default model
if args.model is not None: if args.model is not None: