|
import streamlit as st |
|
import requests |
|
from bs4 import BeautifulSoup |
|
from PIL import Image |
|
import tensorflow as tf |
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
import nltk |
|
import re |
|
import string |
|
import pickle |
|
|
|
from nltk.tokenize import word_tokenize |
|
from nltk.corpus import stopwords |
|
from nltk.stem import WordNetLemmatizer |
|
|
|
import nltk |
|
nltk.data.path.append('/Users/kritsadakruapat/Desktop/Political-Fake-News-Detector-NLP/Fake_News_Detection/nltk_data') |
|
|
|
|
|
|
|
|
|
def remove_punctuation(text): |
|
text = re.sub(f"[{string.punctuation}]", "", text) |
|
text = re.sub(r"[^\w\s]", "", text) |
|
return text |
|
|
|
|
|
stop_words = set(stopwords.words('english')) |
|
|
|
def remove_stopwords(text): |
|
word_tokens = word_tokenize(text) |
|
filtered_text = [word for word in word_tokens if word.lower() not in stop_words] |
|
return ' '.join(filtered_text) |
|
|
|
|
|
lemmatizer = WordNetLemmatizer() |
|
|
|
def lemmatize_text(text): |
|
word_tokens = word_tokenize(text) |
|
lemmatized_tokens = [lemmatizer.lemmatize(word.lower()) for word in word_tokens if word.lower() not in stop_words] |
|
return ' '.join(lemmatized_tokens) |
|
|
|
|
|
def preprocess_text(text): |
|
|
|
text = text.lower() |
|
|
|
text = remove_punctuation(text) |
|
|
|
text = remove_stopwords(text) |
|
|
|
text = lemmatize_text(text) |
|
|
|
sequences = tokenizer.texts_to_sequences([text]) |
|
padded_sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH, padding='post') |
|
return padded_sequences |
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
return tf.keras.models.load_model('model5.h5') |
|
|
|
model = load_model() |
|
|
|
|
|
@st.cache_resource |
|
def load_tokenizer(): |
|
with open('tokenizer.pickle', 'rb') as handle: |
|
tokenizer = pickle.load(handle) |
|
return tokenizer |
|
|
|
tokenizer = load_tokenizer() |
|
|
|
|
|
MAX_SEQUENCE_LENGTH = 200 |
|
THRESHOLD = 0.7 |
|
|
|
|
|
st.title("π° US Political Fake News Text Detector By using LSTM") |
|
st.write("Detect whether a given piece of news is fake or real based on its content. Enter a URL to analyze its authenticity or test with a sample text.") |
|
|
|
|
|
image = Image.open('list.png') |
|
st.image(image, caption='Source: https://en.wikipedia.org/wiki/List_of_fake_news_websites', use_column_width=True) |
|
|
|
|
|
st.title("π Example Fake News Articles") |
|
st.markdown("[Link 1](https://newsexaminer.net/politics/democratic/trump-democrats-face-different-political-landscape-ahead-of-midterms/)") |
|
st.markdown("[Link 2](https://newsexaminer.net/robert-f-kennedy-jr-suspends-2024-presidential-campaign-endorses-donald-trump/)") |
|
st.markdown("[Link 3](https://newsexaminer.net/trumps-fiery-response-to-harris-dnc-speech-a-social-media-frenzy/)") |
|
|
|
|
|
st.title("π Analyze News from a URL") |
|
url = st.text_input("Enter the URL of the news article you want to analyze:") |
|
|
|
|
|
def scrape_text_from_url(url): |
|
try: |
|
response = requests.get(url) |
|
response.raise_for_status() |
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
for script in soup(["script", "style"]): |
|
script.extract() |
|
|
|
text = soup.get_text(separator="\n").strip() |
|
return text[:1000] |
|
except requests.exceptions.RequestException as e: |
|
return f"Error scraping the URL: {e}" |
|
|
|
|
|
def predict_with_threshold(text): |
|
preprocessed_text = preprocess_text(text) |
|
|
|
|
|
prediction = model.predict(preprocessed_text) |
|
raw_prediction = prediction[0][0] |
|
|
|
|
|
st.write(f"Raw model prediction: {raw_prediction}") |
|
|
|
fake_prob = raw_prediction * 100 |
|
|
|
|
|
if raw_prediction > THRESHOLD: |
|
st.write(f"β οΈ Potential Fake News Probability: {fake_prob:.2f}%") |
|
st.write("The news article is likely Fake.") |
|
else: |
|
st.write(f"β οΈ Potential Fake News Probability: {fake_prob:.2f}%") |
|
st.write("The news article is likely Real.") |
|
|
|
|
|
|
|
if url: |
|
with st.spinner("Scraping the text..."): |
|
scraped_text = scrape_text_from_url(url) |
|
if "Error" in scraped_text: |
|
st.error(scraped_text) |
|
else: |
|
|
|
st.subheader("π Scraped Text:") |
|
st.write(scraped_text) |
|
|
|
|
|
token_count = len(scraped_text.split()) |
|
st.write(f"π Word Count: {token_count} words") |
|
|
|
|
|
predict_with_threshold(scraped_text) |
|
|
|
|