Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- .gitattributes +1 -0
- BP.pkl +3 -0
- CN.h5 +3 -0
- DP.h5 +3 -0
- LS.keras +3 -0
- PP.pkl +3 -0
- RN.h5 +3 -0
- Streamlit.py +162 -0
- imdb_backpropogation.py +21 -0
- imdb_perceptron.py +22 -0
- requirements.txt +5 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
LS.keras filter=lfs diff=lfs merge=lfs -text
|
BP.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff7a191c4476cc22a864ea6db1b84c43f72d25a45ad4fb60a9e482c6b261f1bf
|
3 |
+
size 4300
|
CN.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:044f4a3e7ce2ec824e453b130efa2ca2174aa6505d6c7f41a6a3455f5185eb48
|
3 |
+
size 391811360
|
DP.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff1c90c2ee58ab7499b7c136dfc284e403a19fb0091672e8b51e6ffc5bde7578
|
3 |
+
size 10735120
|
LS.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:705ae60751f9f288daf9486f5b3535e437fd7aabb96c5b79f908e7f5e68c9b02
|
3 |
+
size 4194296
|
PP.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d490b9361db03dbd503b6a1424976d05a9865eefe505327b2ad342b989737eba
|
3 |
+
size 2264
|
RN.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58d8eb5511fb98d6791afb55d1d4ce2700cc80272bf8f3117f4b1587dade831c
|
3 |
+
size 1548440
|
Streamlit.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
from tensorflow.keras.models import load_model
|
5 |
+
from tensorflow.keras.datasets import imdb
|
6 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
7 |
+
import pickle
|
8 |
+
|
9 |
+
# Load word index for Sentiment Classification
|
10 |
+
word_to_index = imdb.get_word_index()
|
11 |
+
|
12 |
+
# Function to perform sentiment classification
|
13 |
+
def sentiment_classification(new_review_text, model):
|
14 |
+
max_review_length = 500
|
15 |
+
new_review_tokens = [word_to_index.get(word, 0) for word in new_review_text.split()]
|
16 |
+
new_review_tokens = pad_sequences([new_review_tokens], maxlen=max_review_length)
|
17 |
+
prediction = model.predict(new_review_tokens)
|
18 |
+
if type(prediction) == list:
|
19 |
+
prediction = prediction[0]
|
20 |
+
return "Positive" if prediction > 0.5 else "Negative"
|
21 |
+
|
22 |
+
# Function to perform tumor detection
|
23 |
+
def tumor_detection(img, model):
|
24 |
+
img = Image.open(img)
|
25 |
+
img = img.resize((128, 128))
|
26 |
+
img = np.array(img)
|
27 |
+
input_img = np.expand_dims(img, axis=0)
|
28 |
+
res = model.predict(input_img)
|
29 |
+
return "Tumor Detected" if res else "No Tumor"
|
30 |
+
|
31 |
+
# Streamlit App
|
32 |
+
st.title("Deep Prediction Hub")
|
33 |
+
st.sidebar.header("Options")
|
34 |
+
|
35 |
+
# Choose between tasks
|
36 |
+
task = st.sidebar.radio("Select Task", ("Sentiment Classification", "Tumor Detection"))
|
37 |
+
|
38 |
+
if task == "Sentiment Classification":
|
39 |
+
st.subheader("Sentiment Classification")
|
40 |
+
# Input box for new review
|
41 |
+
new_review_text = st.text_area("Enter a New Review:", value="")
|
42 |
+
if st.button("Submit") and not new_review_text.strip():
|
43 |
+
st.warning("Please enter a review.")
|
44 |
+
|
45 |
+
if new_review_text.strip():
|
46 |
+
st.subheader("Choose Model for Sentiment Classification")
|
47 |
+
model_option = st.selectbox("Select Model", ("Perceptron", "Backpropagation", "DNN", "RNN", "LSTM"))
|
48 |
+
|
49 |
+
# Load models dynamically based on the selected option
|
50 |
+
if model_option == "Perceptron":
|
51 |
+
with open('PP.pkl', 'rb') as file:
|
52 |
+
model = pickle.load(file)
|
53 |
+
elif model_option == "Backpropagation":
|
54 |
+
with open('BP.pkl', 'rb') as file:
|
55 |
+
model = pickle.load(file)
|
56 |
+
elif model_option == "DNN":
|
57 |
+
model = load_model('DP.keras')
|
58 |
+
elif model_option == "RNN":
|
59 |
+
model = load_model('RN.keras')
|
60 |
+
elif model_option == "LSTM":
|
61 |
+
model = load_model('LS.keras')
|
62 |
+
|
63 |
+
if st.button("Classify Sentiment"):
|
64 |
+
result = sentiment_classification(new_review_text, model)
|
65 |
+
st.subheader("Sentiment Classification Result")
|
66 |
+
st.write(f"**{result}**")
|
67 |
+
|
68 |
+
elif task == "Tumor Detection":
|
69 |
+
st.subheader("Tumor Detection")
|
70 |
+
uploaded_file = st.file_uploader("Choose a tumor image...", type=["jpg", "jpeg", "png"])
|
71 |
+
|
72 |
+
if uploaded_file is not None:
|
73 |
+
# Load the tumor detection model
|
74 |
+
model = load_model('CN.keras')
|
75 |
+
st.image(uploaded_file, caption="Uploaded Image.", use_column_width=False, width=200)
|
76 |
+
st.write("")
|
77 |
+
|
78 |
+
if st.button("Detect Tumor"):
|
79 |
+
result = tumor_detection(uploaded_file, model)
|
80 |
+
st.subheader("Tumor Detection Result")
|
81 |
+
st.write(f"**{result}**")
|
82 |
+
import streamlit as st
|
83 |
+
import numpy as np
|
84 |
+
from PIL import Image
|
85 |
+
from tensorflow.keras.models import load_model
|
86 |
+
from tensorflow.keras.datasets import imdb
|
87 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
88 |
+
import pickle
|
89 |
+
|
90 |
+
# Load word index for Sentiment Classification
|
91 |
+
word_to_index = imdb.get_word_index()
|
92 |
+
|
93 |
+
# Function to perform sentiment classification
|
94 |
+
def sentiment_classification(new_review_text, model):
|
95 |
+
max_review_length = 500
|
96 |
+
new_review_tokens = [word_to_index.get(word, 0) for word in new_review_text.split()]
|
97 |
+
new_review_tokens = pad_sequences([new_review_tokens], maxlen=max_review_length)
|
98 |
+
prediction = model.predict(new_review_tokens)
|
99 |
+
if type(prediction) == list:
|
100 |
+
prediction = prediction[0]
|
101 |
+
return "Positive" if prediction > 0.5 else "Negative"
|
102 |
+
|
103 |
+
# Function to perform tumor detection
|
104 |
+
def tumor_detection(img, model):
|
105 |
+
img = Image.open(img)
|
106 |
+
img = img.resize((128, 128))
|
107 |
+
img = np.array(img)
|
108 |
+
input_img = np.expand_dims(img, axis=0)
|
109 |
+
res = model.predict(input_img)
|
110 |
+
return "Tumor Detected" if res else "No Tumor"
|
111 |
+
|
112 |
+
# Streamlit App
|
113 |
+
st.title("Deep Prediction Hub")
|
114 |
+
st.sidebar.header("Options")
|
115 |
+
|
116 |
+
# Choose between tasks
|
117 |
+
task = st.sidebar.radio("Select Task", ("Sentiment Classification", "Tumor Detection"))
|
118 |
+
|
119 |
+
if task == "Sentiment Classification":
|
120 |
+
st.subheader("Sentiment Classification")
|
121 |
+
# Input box for new review
|
122 |
+
new_review_text = st.text_area("Enter a New Review:", value="")
|
123 |
+
if st.button("Submit") and not new_review_text.strip():
|
124 |
+
st.warning("Please enter a review.")
|
125 |
+
|
126 |
+
if new_review_text.strip():
|
127 |
+
st.subheader("Choose Model for Sentiment Classification")
|
128 |
+
model_option = st.selectbox("Select Model", ("Perceptron", "Backpropagation", "DNN", "RNN", "LSTM"))
|
129 |
+
|
130 |
+
# Load models dynamically based on the selected option
|
131 |
+
if model_option == "Perceptron":
|
132 |
+
with open('PP.pkl', 'rb') as file:
|
133 |
+
model = pickle.load(file)
|
134 |
+
elif model_option == "Backpropagation":
|
135 |
+
with open('BP.pkl', 'rb') as file:
|
136 |
+
model = pickle.load(file)
|
137 |
+
elif model_option == "DNN":
|
138 |
+
model = load_model('DP.keras')
|
139 |
+
elif model_option == "RNN":
|
140 |
+
model = load_model('RN.h5')
|
141 |
+
elif model_option == "LSTM":
|
142 |
+
model = load_model('LS.keras')
|
143 |
+
|
144 |
+
if st.button("Classify Sentiment"):
|
145 |
+
result = sentiment_classification(new_review_text, model)
|
146 |
+
st.subheader("Sentiment Classification Result")
|
147 |
+
st.write(f"**{result}**")
|
148 |
+
|
149 |
+
elif task == "Tumor Detection":
|
150 |
+
st.subheader("Tumor Detection")
|
151 |
+
uploaded_file = st.file_uploader("Choose a tumor image...", type=["jpg", "jpeg", "png"])
|
152 |
+
|
153 |
+
if uploaded_file is not None:
|
154 |
+
# Load the tumor detection model
|
155 |
+
model = load_model('CN.h5')
|
156 |
+
st.image(uploaded_file, caption="Uploaded Image.", use_column_width=False, width=200)
|
157 |
+
st.write("")
|
158 |
+
|
159 |
+
if st.button("Detect Tumor"):
|
160 |
+
result = tumor_detection(uploaded_file, model)
|
161 |
+
st.subheader("Tumor Detection Result")
|
162 |
+
st.write(f"**{result}**")
|
imdb_backpropogation.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tensorflow.keras.datasets import imdb
|
2 |
+
from BackPropogation import BackPropogation
|
3 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
4 |
+
from sklearn.metrics import accuracy_score
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
top_words = 5000
|
8 |
+
(X_train, y_train), (X_test,y_test) = imdb.load_data(num_words=top_words)
|
9 |
+
|
10 |
+
max_review_length = 500
|
11 |
+
|
12 |
+
X_train = pad_sequences(X_train, maxlen=max_review_length)
|
13 |
+
X_test = pad_sequences(X_test, maxlen=max_review_length)
|
14 |
+
|
15 |
+
backprop = BackPropogation(epochs=100,learning_rate=0.01,activation_function='sigmoid')
|
16 |
+
backprop.fit(X_train, y_train)
|
17 |
+
pred = backprop.predict(X_test)
|
18 |
+
print(f"Accuracy : {accuracy_score(pred, y_test)}")
|
19 |
+
|
20 |
+
with open(r'C:\Users\Sreya\Desktop\deeplearning\Predictionsdl\Deep-Prediction-Hub\BP.pkl','wb') as file:
|
21 |
+
pickle.dump(backprop, file)
|
imdb_perceptron.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tensorflow.keras.datasets import imdb
|
2 |
+
from Perceptron import Perceptron
|
3 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
4 |
+
from sklearn.metrics import accuracy_score
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
top_words = 5000
|
8 |
+
(X_train, y_train), (X_test,y_test) = imdb.load_data(num_words=top_words)
|
9 |
+
|
10 |
+
max_review_length = 500
|
11 |
+
X_train = pad_sequences(X_train, maxlen=max_review_length)
|
12 |
+
X_test = pad_sequences(X_test, maxlen=max_review_length)
|
13 |
+
|
14 |
+
percep = Perceptron(epochs=100)
|
15 |
+
|
16 |
+
percep.fit(X_train, y_train)
|
17 |
+
pred = percep.predict(X_test)
|
18 |
+
|
19 |
+
print(f"Accuracy : {accuracy_score(pred, y_test)}")
|
20 |
+
|
21 |
+
with open(r'C:\Users\Sreya\Desktop\deeplearning\Predictionsdl\Deep-Prediction-Hub\PP.pkl','wb') as file:
|
22 |
+
pickle.dump(percep, file)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
numpy
|
3 |
+
Pillow
|
4 |
+
tensorflow
|
5 |
+
tqdm
|