gpt_detector / app.py
rusen's picture
debug2
5f2acbf
raw
history blame
1.53 kB
import gradio as gr
from transformers import pipeline
import numpy as np
roberta_base_detector = pipeline("text-classification", model="Models/fine_tuned/roberta-base-openai-detector-model", tokenizer="Models/fine_tuned/roberta-base-openai-detector-tokenizer")
chatgpt_lli_hc3_detector = pipeline("text-classification", model="Models/fine_tuned/chatgpt-detector-lli-hc3-model", tokenizer="Models/fine_tuned/chatgpt-detector-lli-hc3-tokenizer")
chatgpt_roberta_detector = pipeline("text-classification", model="Models/fine_tuned/chatgpt-detector-roberta-model", tokenizer="Models/fine_tuned/chatgpt-detector-roberta-tokenizer")
def classify_text(text):
# Get predictions from each model
roberta_base_pred = 1 if roberta_base_detector(text)[0]['label'] == "Fake" else: 0
chatgpt_lli_hc3_pred = chatgpt_lli_hc3_detector(text)[0]['label']
chatgpt_roberta_pred = chatgpt_roberta_detector(text)[0]['label']
# Count the votes for AI and Human
votes = {"AI": 0, "Human": 0}
for pred in [roberta_base_pred, chatgpt_lli_hc3_pred, chatgpt_roberta_pred]:
if pred == 1:
votes["AI"] += 1
else:
votes["Human"] += 1
# Determine final decision based on majority
if votes["AI"] > votes["Human"]:
return chatgpt_lli_hc3_pred
else:
return chatgpt_lli_hc3_pred
# Create Gradio Interface
iface = gr.Interface(
fn=classify_text,
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to classify..."),
outputs="text"
)
iface.launch()