Spaces:
Sleeping
Sleeping
Commit
Β·
e65e5ab
1
Parent(s):
5a99665
up model
Browse files- app.py +2 -1
- demo_watermark.py +1 -3
app.py
CHANGED
@@ -23,7 +23,8 @@ arg_dict = {
|
|
23 |
# 'model_name_or_path': 'facebook/opt-125m',
|
24 |
# 'model_name_or_path': 'facebook/opt-1.3b',
|
25 |
# 'model_name_or_path': 'facebook/opt-2.7b',
|
26 |
-
'model_name_or_path': 'facebook/opt-6.7b',
|
|
|
27 |
'load_fp16' : True,
|
28 |
# 'load_fp16' : False,
|
29 |
'prompt_max_length': None,
|
|
|
23 |
# 'model_name_or_path': 'facebook/opt-125m',
|
24 |
# 'model_name_or_path': 'facebook/opt-1.3b',
|
25 |
# 'model_name_or_path': 'facebook/opt-2.7b',
|
26 |
+
# 'model_name_or_path': 'facebook/opt-6.7b',
|
27 |
+
'model_name_or_path': 'facebook/opt-13b',
|
28 |
'load_fp16' : True,
|
29 |
# 'load_fp16' : False,
|
30 |
'prompt_max_length': None,
|
demo_watermark.py
CHANGED
@@ -328,10 +328,7 @@ def run_gradio(args, model=None, device=None, tokenizer=None):
|
|
328 |
gr.Markdown("## π§ [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) π")
|
329 |
with gr.Row():
|
330 |
gr.Markdown("")
|
331 |
-
with gr.Row():
|
332 |
gr.Markdown("[jwkirchenbauer/lm-watermarking](https://github.com/jwkirchenbauer/lm-watermarking)")
|
333 |
-
with gr.Row():
|
334 |
-
gr.Markdown(f"Language model: {args.model_name_or_path}")
|
335 |
with gr.Accordion("Understanding the output metrics",open=False):
|
336 |
gr.Markdown(
|
337 |
"""
|
@@ -365,6 +362,7 @@ def run_gradio(args, model=None, device=None, tokenizer=None):
|
|
365 |
Longer prompts and stopping mid sentence often helps encourage more fluent, longer genrations.
|
366 |
"""
|
367 |
)
|
|
|
368 |
|
369 |
# Construct state for parameters, define updates and toggles
|
370 |
default_prompt = args.__dict__.pop("default_prompt")
|
|
|
328 |
gr.Markdown("## π§ [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) π")
|
329 |
with gr.Row():
|
330 |
gr.Markdown("")
|
|
|
331 |
gr.Markdown("[jwkirchenbauer/lm-watermarking](https://github.com/jwkirchenbauer/lm-watermarking)")
|
|
|
|
|
332 |
with gr.Accordion("Understanding the output metrics",open=False):
|
333 |
gr.Markdown(
|
334 |
"""
|
|
|
362 |
Longer prompts and stopping mid sentence often helps encourage more fluent, longer genrations.
|
363 |
"""
|
364 |
)
|
365 |
+
gr.Markdown(f"Language model: {args.model_name_or_path}")
|
366 |
|
367 |
# Construct state for parameters, define updates and toggles
|
368 |
default_prompt = args.__dict__.pop("default_prompt")
|