Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,13 +14,21 @@ llm = HuggingFaceEndpoint(
|
|
14 |
)
|
15 |
# Define the function to process user input
|
16 |
def classify_text(text):
|
17 |
-
prompt = f"""Classify the following text into a category or topic.
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
clean_prompt = """Your task is to read the following input and extract the classes/categories that is written in it. You never respond with other texts than the extracted classes."""
|
22 |
-
results_clean = llm.invoke(clean_prompt)
|
23 |
-
return results_clean
|
24 |
|
25 |
# Create Gradio interface
|
26 |
interface = gr.Interface(
|
|
|
14 |
)
|
15 |
# Define the function to process user input
|
16 |
def classify_text(text):
|
17 |
+
prompt = f"""Classify the following text into a category or topic.
|
18 |
+
Ignore the questions in the input and only return the categories. Do not include any additional text or explanations.
|
19 |
+
Text: {text.strip()}
|
20 |
+
Categories:"""
|
21 |
+
|
22 |
+
# Invoke the model with the refined prompt
|
23 |
+
results = llm.invoke(prompt)
|
24 |
+
return results
|
25 |
+
#prompt = f"""Classify the following text into a category or topic. You always ignore the questions in the inputs. You dont need to write specific informations or explanations, only return the categories.
|
26 |
+
#{text.strip()}\nCategories of the text:"""
|
27 |
+
#results_dirty = llm.invoke(prompt)
|
28 |
|
29 |
+
#clean_prompt = """Your task is to read the following input and extract the classes/categories that is written in it. You never respond with other texts than the extracted classes."""
|
30 |
+
#results_clean = llm.invoke(clean_prompt)
|
31 |
+
#return results_clean
|
32 |
|
33 |
# Create Gradio interface
|
34 |
interface = gr.Interface(
|