Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,8 +10,7 @@ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom')
|
|
10 |
|
11 |
BASE_MODEL = "bigscience/bloom-3b"
|
12 |
|
13 |
-
|
14 |
-
LORA_WEIGHTS = f"jslin09/LegalChatbot-bloom-3b"
|
15 |
|
16 |
config = PeftConfig.from_pretrained(LORA_WEIGHTS)
|
17 |
|
@@ -112,7 +111,7 @@ def evaluate(
|
|
112 |
max_new_tokens=128,
|
113 |
**kwargs,
|
114 |
):
|
115 |
-
prompt =
|
116 |
inputs = tokenizer(prompt, return_tensors="pt")
|
117 |
input_ids = inputs["input_ids"].to(device)
|
118 |
generation_config = GenerationConfig(
|
|
|
10 |
|
11 |
BASE_MODEL = "bigscience/bloom-3b"
|
12 |
|
13 |
+
LORA_WEIGHTS = "jslin09/LegalChatbot-bloom-3b"
|
|
|
14 |
|
15 |
config = PeftConfig.from_pretrained(LORA_WEIGHTS)
|
16 |
|
|
|
111 |
max_new_tokens=128,
|
112 |
**kwargs,
|
113 |
):
|
114 |
+
prompt = generate_prompt(instruction, input) # 中文版的話,函數名稱要改用 generate_prompt_tw
|
115 |
inputs = tokenizer(prompt, return_tensors="pt")
|
116 |
input_ids = inputs["input_ids"].to(device)
|
117 |
generation_config = GenerationConfig(
|