Spaces:
Sleeping
Sleeping
Fix device bug
Browse files
app.py
CHANGED
@@ -49,6 +49,8 @@ st.write('Loading the pretrained model ...')
|
|
49 |
model_name = "CarolXia/pii-kd-deberta-v2"
|
50 |
# config = PeftConfig.from_pretrained(model_name)
|
51 |
model = DebertaV2ForTokenClassification.from_pretrained(model_name, token=st.secrets["HUGGINGFACE_TOKEN"])
|
|
|
|
|
52 |
# Try quantization instead
|
53 |
# model = AutoModelForTokenClassification.from_pretrained(model_name, device_map="auto", load_in_8bit=True)
|
54 |
tokenizer = DebertaV2Tokenizer.from_pretrained("microsoft/mdeberta-v3-base", token=st.secrets["HUGGINGFACE_TOKEN"])
|
@@ -61,8 +63,7 @@ pytorch_total_params = sum(p.numel() for p in model.parameters())
|
|
61 |
torch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
62 |
print(f'total params: {pytorch_total_params}. tunable params: {torch_total_params}')
|
63 |
|
64 |
-
|
65 |
-
model = model.to("cuda")
|
66 |
|
67 |
# Sample text containing PII/PHI entities
|
68 |
text = """
|
|
|
49 |
model_name = "CarolXia/pii-kd-deberta-v2"
|
50 |
# config = PeftConfig.from_pretrained(model_name)
|
51 |
model = DebertaV2ForTokenClassification.from_pretrained(model_name, token=st.secrets["HUGGINGFACE_TOKEN"])
|
52 |
+
if torch.cuda.is_available():
|
53 |
+
model = model.to("cuda")
|
54 |
# Try quantization instead
|
55 |
# model = AutoModelForTokenClassification.from_pretrained(model_name, device_map="auto", load_in_8bit=True)
|
56 |
tokenizer = DebertaV2Tokenizer.from_pretrained("microsoft/mdeberta-v3-base", token=st.secrets["HUGGINGFACE_TOKEN"])
|
|
|
63 |
torch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
64 |
print(f'total params: {pytorch_total_params}. tunable params: {torch_total_params}')
|
65 |
|
66 |
+
|
|
|
67 |
|
68 |
# Sample text containing PII/PHI entities
|
69 |
text = """
|