import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model_path = "modernbert.bin" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base") model = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41) model.load_state_dict(torch.load(model_path, map_location=device)) model.to(device) model.eval() label_mapping = { 0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b', 6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b', 11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small', 14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it', 18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o', 22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b', 27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b', 31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b', 35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b', 39: 'text-davinci-002', 40: 'text-davinci-003' } def classify_text(text): if not text.strip(): return "----" inputs = tokenizer(text, return_tensors="pt", truncation=True) inputs = {key: value.to(device) for key, value in inputs.items()} with torch.no_grad(): outputs = model(**inputs) probabilities = torch.softmax(outputs.logits, dim=1)[0] predicted_class = torch.argmax(probabilities).item() confidence = probabilities[predicted_class].item() * 100 if predicted_class == 24: prediction_label = f"✅ - The text is **{confidence:.2f}%** likely Human written." model_info = "" else: prediction_label = f"🤖 - The text is **{confidence:.2f}%** likely AI generated." model_info = f"**Identified AI Model:** {label_mapping[predicted_class]}" result_message = f"**Result:**\n\n{prediction_label}" if model_info: result_message += f"\n\n{model_info}" return result_message title = "AI Text Detector" description = """ This tool uses the **ModernBERT** model to identify whether a given text was written by a human or generated by artificial intelligence (AI).
Human Verification: Human-written content is clearly marked.
🔍 Model Detection: Can identify content from over 40 AI models.
📈 Accuracy: Works best with longer texts for improved precision.

Paste your text below to analyze its origin. """ bottom_text = "**Developed by SzegedAI**" iface = gr.Blocks(css=""" @import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap'); #text_input_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-bottom: 20px; width: 60%; box-sizing: border-box; margin: auto; } .form.svelte-633qhp { background: none; border: none; box-shadow: none; } #result_output_box { border-radius: 10px; border: 2px solid #4CAF50; font-size: 18px; padding: 15px; margin-top: 20px; width: 40%; box-sizing: border-box; text-align: center; margin: auto; } @media (max-width: 768px) { #result_output_box { width: 80%; } } body { font-family: 'Roboto Mono', sans-serif !important; padding: 20px; display: block; justify-content: center; align-items: center; height: 100vh; overflow-y: auto; } .gradio-container { border: 1px solid #4CAF50; border-radius: 15px; padding: 30px; box-shadow: 0px 0px 10px rgba(0,255,0,0.6); max-width: 600px; margin: auto; } h1 { text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 30px; } .highlight-human { color: #4CAF50; font-weight: bold; background: rgba(76, 175, 80, 0.2); padding: 5px; border-radius: 8px; } .highlight-ai { color: #FF5733; font-weight: bold; background: rgba(255, 87, 51, 0.2); padding: 5px; border-radius: 8px; } #bottom_text { text-align: center; margin-top: 50px; font-weight: bold; font-size: 20px; } .block.svelte-11xb1hd{ background: none !important; } """) with iface: gr.Markdown(f"# {title}") gr.Markdown(description) text_input = gr.Textbox(label="", placeholder="Type or paste your content here...", elem_id="text_input_box", lines=5) result_output = gr.Markdown("**Results will appear here...**", elem_id="result_output_box") text_input.change(classify_text, inputs=text_input, outputs=result_output) gr.Markdown(bottom_text, elem_id="bottom_text") iface.launch(share=True)