Spaces:
Running
Running
add prompt guard for user prompt and safety check for LLM generated content after user feedback, add api key for prompt guard
Browse files
helper.py
CHANGED
@@ -111,9 +111,12 @@ PROMPT_GUARD_CONFIG = {
|
|
111 |
def initialize_prompt_guard():
|
112 |
"""Initialize Prompt Guard model"""
|
113 |
try:
|
114 |
-
|
|
|
|
|
|
|
115 |
model = AutoModelForSequenceClassification.from_pretrained(
|
116 |
-
PROMPT_GUARD_CONFIG["model_id"]
|
117 |
)
|
118 |
return model, tokenizer
|
119 |
except Exception as e:
|
|
|
111 |
def initialize_prompt_guard():
|
112 |
"""Initialize Prompt Guard model"""
|
113 |
try:
|
114 |
+
api_key = get_huggingface_api_key()
|
115 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
116 |
+
PROMPT_GUARD_CONFIG["model_id"], api_key=api_key
|
117 |
+
)
|
118 |
model = AutoModelForSequenceClassification.from_pretrained(
|
119 |
+
PROMPT_GUARD_CONFIG["model_id"], api_key=api_key
|
120 |
)
|
121 |
return model, tokenizer
|
122 |
except Exception as e:
|