kingabzpro
commited on
Commit
•
7be036b
1
Parent(s):
db4652a
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
import torch
|
5 |
|
6 |
MODEL_URL = "kingabzpro/Llama-3.1-8B-Instruct-Mental-Health-Classification"
|
@@ -8,8 +8,18 @@ MODEL_URL = "kingabzpro/Llama-3.1-8B-Instruct-Mental-Health-Classification"
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_URL)
|
9 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
def prediction(text):
|
15 |
# create pipeline
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig, AutoConfig
|
4 |
import torch
|
5 |
|
6 |
MODEL_URL = "kingabzpro/Llama-3.1-8B-Instruct-Mental-Health-Classification"
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_URL)
|
9 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
10 |
|
11 |
+
bnbConfig = BitsAndBytesConfig(
|
12 |
+
load_in_4bit = True,
|
13 |
+
bnb_4bit_quant_type="nf4",
|
14 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
15 |
+
)
|
16 |
+
|
17 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_URL,
|
18 |
+
quantization_config=bnbConfig,
|
19 |
+
low_cpu_mem_usage=True,
|
20 |
+
return_dict=True,
|
21 |
+
torch_dtype=torch.float16,
|
22 |
+
device_map="cpu")
|
23 |
|
24 |
def prediction(text):
|
25 |
# create pipeline
|