Add --verbose option (oops)

This commit is contained in:
oobabooga 2023-01-26 02:12:53 -03:00
parent abc920752f
commit 61611197e0
2 changed files with 5 additions and 0 deletions

View file

@ -135,6 +135,7 @@ Optionally, you can use the following command-line flags:
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example.|
| `--listen` | Make the web UI reachable from your local network.|
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
| `--verbose` | Print the prompts to the terminal. |
Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).

View file

@ -34,6 +34,7 @@ parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
args = parser.parse_args()
if (args.chat or args.cai_chat) and not args.no_stream:
@ -164,6 +165,9 @@ def formatted_outputs(reply, model_name):
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None):
global model, tokenizer, model_name, loaded_preset, preset
if args.verbose:
print(f"\n\n{question}\n--------------------\n")
if selected_model != model_name:
model_name = selected_model
model = tokenizer = None