alokvtk commited on
Commit
b2e3534
1 Parent(s): 8c9ee68

Upload 15 files

Browse files
BackPropogation.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class BackPropogation:
6
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
7
+ self.bias = 0
8
+ self.learning_rate = learning_rate
9
+ self.max_epochs = epochs
10
+ self.activation_function = activation_function
11
+
12
+
13
+ def activate(self, x):
14
+ if self.activation_function == 'step':
15
+ return 1 if x >= 0 else 0
16
+ elif self.activation_function == 'sigmoid':
17
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
18
+ elif self.activation_function == 'relu':
19
+ return 1 if max(0,x)>=0.5 else 0
20
+
21
+ def fit(self, X, y):
22
+ error_sum=0
23
+ n_features = X.shape[1]
24
+ self.weights = np.zeros((n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+
32
+ # Calculating loss and updating weights.
33
+ error = target - prediction
34
+ self.weights += self.learning_rate * error * inputs
35
+ self.bias += self.learning_rate * error
36
+
37
+ print(f"Updated Weights after epoch {epoch} with {self.weights}")
38
+ print("Training Completed")
39
+
40
+ def predict(self, X):
41
+ predictions = []
42
+ for i in range(len(X)):
43
+ inputs = X[i]
44
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
45
+ prediction = self.activate(weighted_sum)
46
+ predictions.append(prediction)
47
+ return predictions
48
+
49
+
50
+
51
+
52
+
53
+
Lstm_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b03fc488fed00a614e9c9d85b4bfc4c3de4bf51f950ab3fdbc959cc8736f456c
3
+ size 2594296
Model_backprop.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08f18405b62db7924aebb1b734d1b1895d4b2a3b1f42b9b34651329488a80e1d
3
+ size 1896
Percep_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94098089d5f8b390533c214ddf2804469db9772089ac429c336a02f2d44927c6
3
+ size 1063
Perceptron.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class Perceptron:
6
+
7
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
8
+ self.bias = 0
9
+ self.learning_rate = learning_rate
10
+ self.max_epochs = epochs
11
+ self.activation_function = activation_function
12
+
13
+
14
+ def activate(self, x):
15
+ if self.activation_function == 'step':
16
+ return 1 if x >= 0 else 0
17
+ elif self.activation_function == 'sigmoid':
18
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
19
+ elif self.activation_function == 'relu':
20
+ return 1 if max(0,x)>=0.5 else 0
21
+
22
+ def fit(self, X, y):
23
+ n_features = X.shape[1]
24
+ self.weights = np.random.randint(n_features, size=(n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+ print("Training Completed")
32
+
33
+ def predict(self, X):
34
+ predictions = []
35
+ for i in range(len(X)):
36
+ inputs = X[i]
37
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
38
+ prediction = self.activate(weighted_sum)
39
+ predictions.append(prediction)
40
+ return predictions
SMSSpamCollection ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from PIL import Image
4
+ from tensorflow.keras.models import load_model
5
+ import joblib
6
+ from tensorflow.keras.preprocessing.text import Tokenizer
7
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
8
+ from tensorflow.keras.applications.inception_v3 import preprocess_input
9
+ from tensorflow.keras.datasets import imdb
10
+
11
+ import cv2
12
+ from BackPropogation import BackPropogation
13
+ from Perceptron import Perceptron
14
+ from sklearn.linear_model import Perceptron
15
+ import tensorflow as tf
16
+ import joblib
17
+ import pickle
18
+ from numpy import argmax
19
+
20
+
21
+ # Load saved models
22
+ image_model = load_model('tumor_detection_model.h5')
23
+ dnn_model = load_model('sms_spam_detection_dnnmodel.h5')
24
+ rnn_model = load_model('spam_detection_rnn_model.h5')
25
+
26
+ # Loading the model using pickle
27
+ with open(r'D:/one/OneDrive/Desktop/Streamlit/Model_backprop.pkl', 'rb') as file:
28
+ backprop_model = pickle.load(file)
29
+
30
+ with open(r'D:/one/OneDrive/Desktop/Streamlit/Percep_model.pkl', 'rb') as file:
31
+ perceptron_model = pickle.load(file)
32
+
33
+ with open(r'D:/one/OneDrive/Desktop/Streamlit/tokeniser.pkl', 'rb') as file:
34
+ loaded_tokeniser = pickle.load(file)
35
+
36
+ lstm_model_path='Lstm_model.h5'
37
+
38
+ # Streamlit app
39
+ st.title("Classification")
40
+
41
+ # Sidebar
42
+ task = st.sidebar.selectbox("Select Task", ["Tumor Detection ", "Sentiment Classification"])
43
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
44
+ max_length=10
45
+
46
+ def predictdnn_spam(text):
47
+ sequence = loaded_tokeniser.texts_to_sequences([text])
48
+ padded_sequence = pad_sequences(sequence, maxlen=10)
49
+ prediction = dnn_model.predict(padded_sequence)[0][0]
50
+ if prediction >= 0.5:
51
+ return "not spam"
52
+ else:
53
+ return "spam"
54
+ def preprocess_imdbtext(text, maxlen=200, num_words=10000):
55
+ # Tokenizing the text
56
+ tokenizer = Tokenizer(num_words=num_words)
57
+ tokenizer.fit_on_texts(text)
58
+
59
+ # Converting text to sequences
60
+ sequences = tokenizer.texts_to_sequences(text)
61
+
62
+ # Padding sequences to a fixed length
63
+ padded_sequences = pad_sequences(sequences, maxlen=maxlen)
64
+
65
+ return padded_sequences, tokenizer
66
+
67
+ def predict_sentiment_backprop(text, model):
68
+ preprocessed_text = preprocess_imdbtext(text, 200)
69
+ prediction = backprop_model.predict(preprocessed_text)
70
+ return prediction
71
+
72
+ def preprocess_imdb_lstm(user_input, tokenizer, max_review_length=500):
73
+ # Tokenize and pad the user input
74
+ user_input_sequence = tokenizer.texts_to_sequences([user_input])
75
+ user_input_padded = pad_sequences(user_input_sequence, maxlen=max_review_length)
76
+ return user_input_padded
77
+
78
+ def predict_sentiment_lstm(model, user_input, tokenizer):
79
+ preprocessed_input = preprocess_imdb_lstm(user_input, tokenizer)
80
+ prediction = model.predict(preprocessed_input)
81
+ return prediction
82
+
83
+ def predict_sentiment_precep(user_input, num_words=1000, max_len=200):
84
+ word_index = imdb.get_word_index()
85
+ input_sequence = [word_index[word] if word in word_index and word_index[word] < num_words else 0 for word in user_input.split()]
86
+ padded_sequence = pad_sequences([input_sequence], maxlen=max_len)
87
+ return padded_sequence
88
+
89
+
90
+
91
+ def preprocess_message_dnn(message, tokeniser, max_length):
92
+ # Tokenize and pad the input message
93
+ encoded_message = tokeniser.texts_to_sequences([message])
94
+ padded_message = tf.keras.preprocessing.sequence.pad_sequences(encoded_message, maxlen=max_length, padding='post')
95
+ return padded_message
96
+
97
+ def predict_rnnspam(message, tokeniser, max_length):
98
+ # Preprocess the message
99
+ processed_message = preprocess_message_dnn(message, tokeniser, max_length)
100
+
101
+ # Predict spam or ham
102
+ prediction = rnn_model.predict(processed_message)
103
+ if prediction >= 0.5:
104
+ return "Spam"
105
+ else:
106
+ return "Ham"
107
+
108
+
109
+ # make a prediction for CNN
110
+ def preprocess_image(image):
111
+ image = image.resize((299, 299))
112
+ image_array = np.array(image)
113
+ preprocessed_image = preprocess_input(image_array)
114
+
115
+ return preprocessed_image
116
+
117
+ def make_prediction_cnn(image, image_model):
118
+ img = image.resize((128, 128))
119
+ img_array = np.array(img)
120
+ img_array = img_array.reshape((1, img_array.shape[0], img_array.shape[1], img_array.shape[2]))
121
+
122
+ preprocessed_image = preprocess_input(img_array)
123
+ prediction = image_model.predict(preprocessed_image)
124
+
125
+ if prediction > 0.5:
126
+ st.write("Tumor Detected")
127
+ else:
128
+ st.write("No Tumor")
129
+ if task == "Sentiment Classification":
130
+ st.subheader("Choose Model")
131
+ model_choice = st.radio("Select Model", ["DNN", "RNN", "Perceptron", "Backpropagation","LSTM"])
132
+
133
+ st.subheader("Text Input")
134
+
135
+
136
+ if model_choice=='DNN':
137
+ text_input = st.text_area("Enter Text")
138
+ if st.button("Predict"):
139
+ if text_input:
140
+ prediction_result = predictdnn_spam(text_input)
141
+ st.write(f"The review's class is: {prediction_result}")
142
+ else:
143
+ st.write("Enter a movie review")
144
+
145
+ elif model_choice == "RNN":
146
+ text_input = st.text_area("Enter Text")
147
+ if text_input:
148
+ prediction_result = predict_rnnspam(text_input,loaded_tokeniser,max_length=10)
149
+ if st.button("Predict"):
150
+ st.write(f"The message is classified as: {prediction_result}")
151
+ else:
152
+ st.write("Please enter some text for prediction")
153
+ elif model_choice == "Perceptron":
154
+ text_input = st.text_area("Enter Text" )
155
+ if st.button('Predict'):
156
+ processed_input = predict_sentiment_precep(text_input)
157
+ prediction = perceptron_model.predict(processed_input)[0]
158
+ sentiment = "Positive" if prediction == 1 else "Negative"
159
+ st.write(f"Predicted Sentiment: {sentiment}")
160
+ elif model_choice == "LSTM":
161
+
162
+ lstm_model = tf.keras.models.load_model(lstm_model_path)
163
+ text_input = st.text_area("Enter text for sentiment analysis:", "")
164
+ if st.button("Predict"):
165
+ tokenizer = Tokenizer(num_words=5000)
166
+ prediction = predict_sentiment_lstm(lstm_model, text_input, tokenizer)
167
+
168
+ if prediction[0][0]<0.5 :
169
+ result="Negative"
170
+ st.write(f"The message is classified as: {result}")
171
+ else:
172
+ result="Positive"
173
+ st.write(f"The message is classified as: {result}")
174
+
175
+ elif model_choice == "Backpropagation":
176
+ text_input = st.text_area("Enter Text" )
177
+ if st.button('Predict'):
178
+ processed_input = predict_sentiment_precep(text_input)
179
+ prediction = backprop_model.predict(processed_input)[0]
180
+ sentiment = "Positive" if prediction == 1 else "Negative"
181
+ st.write(f"Predicted Sentiment: {sentiment}")
182
+
183
+ else:
184
+ st.subheader("Choose Model")
185
+ model_choice = st.radio("Select Model", ["CNN"])
186
+
187
+ st.subheader("Image Input")
188
+ image_input = st.file_uploader("Choose an image...", type="jpg")
189
+
190
+ if image_input is not None:
191
+ image = Image.open(image_input)
192
+ st.image(image, caption="Uploaded Image.", use_column_width=True)
193
+
194
+ # Preprocess the image
195
+ preprocessed_image = preprocess_image(image)
196
+
197
+ if st.button("Predict"):
198
+ if model_choice == "CNN":
199
+ make_prediction_cnn(image, image_model)
200
+
201
+
chck.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from PIL import Image
4
+ from tensorflow.keras.models import load_model
5
+ from tensorflow.keras.preprocessing.text import Tokenizer
6
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
7
+ from tensorflow.keras.applications.inception_v3 import preprocess_input
8
+ import tensorflow as tf
9
+ import joblib
10
+
11
+ # Load saved models
12
+ image_model = load_model('tumor_detection_model.h5')
13
+ dnn_model = load_model('sms_spam_detection_dnnmodel.h5')
14
+ rnn_model = load_model('spam_detection_rnn_model.h5')
15
+ perceptron_model = joblib.load('imdb_perceptron_model.pkl')
16
+ backprop_model = joblib.load('backprop_model.pkl')
17
+ LSTM_model = load_model('imdb_LSTM.h5')
18
+
19
+ # Streamlit app
20
+ st.title("Classification")
21
+
22
+ # Sidebar
23
+ task = st.sidebar.selectbox("Select Task", ["Tumor Detection", "Sentiment Classification"])
24
+
25
+ def preprocess_message_dnn(message, tokeniser, max_length):
26
+ encoded_message = tokeniser.texts_to_sequences([message])
27
+ padded_message = pad_sequences(encoded_message, maxlen=max_length, padding='post')
28
+ return padded_message
29
+
30
+ def predict_dnnspam(message, tokeniser, max_length):
31
+ processed_message = preprocess_message_dnn(message, tokeniser, max_length)
32
+ prediction = dnn_model.predict(processed_message)
33
+ return "Spam" if prediction >= 0.5 else "Ham"
34
+
35
+ # Other prediction functions for sentiment analysis can follow a similar pattern
36
+
37
+ # Function for CNN prediction
38
+ def preprocess_image(image):
39
+ image = image.resize((299, 299))
40
+ image_array = np.array(image)
41
+ preprocessed_image = preprocess_input(image_array)
42
+ return preprocessed_image
43
+
44
+ def make_prediction_cnn(image, model):
45
+ img = image.resize((128, 128))
46
+ img_array = np.array(img)
47
+ img_array = img_array.reshape((1, img_array.shape[0], img_array.shape[1], img_array.shape[2]))
48
+ preprocessed_image = preprocess_input(img_array)
49
+ prediction = model.predict(preprocessed_image)
50
+ return "Tumor Detected" if prediction > 0.5 else "No Tumor"
51
+
52
+ if task == "Sentiment Classification":
53
+ st.subheader("Choose Model")
54
+ model_choice = st.radio("Select Model", ["DNN", "RNN", "Perceptron", "Backpropagation", "LSTM"])
55
+
56
+ st.subheader("Text Input")
57
+ text_input = st.text_area("Enter Text")
58
+
59
+ if st.button("Predict"):
60
+ if model_choice == "DNN":
61
+ # You need to define tokeniser and max_length for DNN model
62
+ prediction_result = predict_dnnspam(text_input, tokeniser, max_length)
63
+ st.write(f"The message is classified as: {prediction_result}")
64
+ # Other model choices should call respective prediction functions similarly
65
+
66
+ else:
67
+ st.subheader("Choose Model")
68
+ model_choice = st.radio("Select Model", ["CNN"])
69
+
70
+ st.subheader("Image Input")
71
+ image_input = st.file_uploader("Choose an image...", type="jpg")
72
+
73
+ if image_input is not None:
74
+ image = Image.open(image_input)
75
+ st.image(image, caption="Uploaded Image.", use_column_width=True)
76
+
77
+ if st.button("Predict"):
78
+ if model_choice == "CNN":
79
+ prediction_result = make_prediction_cnn(image, image_model)
80
+ st.write(prediction_result)
imdb_LSTM.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05b99b60023dd89e8eaa59c6713aa7e505c248912d15c58b8737e84bcdd35e7f
3
+ size 2593696
lstm.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b03fc488fed00a614e9c9d85b4bfc4c3de4bf51f950ab3fdbc959cc8736f456c
3
+ size 2594296
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ opencv-python
2
+ streamlit
3
+ Pillow
4
+ tensorflow
5
+ numpy
6
+ tqdm
7
+ scikit-learn
sms_spam_detection_dnnmodel.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98a69a1ccd2e7048bb72447cff024b354fa7cdec602de3d0b31f6129963951f9
3
+ size 3160600
spam_detection_rnn_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e609428e9471fe8e79de5acf38345621a7830dac2487aa5759d7c7f2982ad8b9
3
+ size 2271056
tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:531dafe93d5f15a108f6516d069d6ee8c8245965da619c6290c5bcc9d877cb84
3
+ size 5412642
tumor_detection_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:180027aca8773916e489be80ab2546c148857c9af4db0e39c6a31a5f28cdcc52
3
+ size 391814584