iseddik
commited on
Commit
•
b6249dd
1
Parent(s):
4e5a464
feat: add use cases
Browse files- ANN_Tifinagh_MNIST_Model.py +130 -0
- CNN_Tifinagh_MNIST_Model.py +133 -0
- Gans_For_One_Simples_Tifinagh_MNIST.py +172 -0
- README.md +10 -0
- T_SNE_Tifinagh_MNIST.py +88 -0
ANN_Tifinagh_MNIST_Model.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Classification by a classical ANN (MLP) - Tifinagh-MNIST
|
2 |
+
|
3 |
+
## The libraries we will use
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import numpy as np
|
8 |
+
from matplotlib import pyplot as plt
|
9 |
+
import cv2
|
10 |
+
import keras
|
11 |
+
from keras.models import Sequential
|
12 |
+
from keras.layers import Dense, Dropout
|
13 |
+
from tensorflow.keras.utils import to_categorical
|
14 |
+
|
15 |
+
"""## Data loading and adaptation"""
|
16 |
+
|
17 |
+
def upload_data(path_name, number_of_class, number_of_images):
|
18 |
+
X_Data = []
|
19 |
+
Y_Data = []
|
20 |
+
for i in range(number_of_class):
|
21 |
+
images = os.listdir(path_name + str(i))
|
22 |
+
for j in range(number_of_images):
|
23 |
+
img = cv2.imread(path_name + str(i)+ '/' + images[j], 0)
|
24 |
+
X_Data.append(img)
|
25 |
+
Y_Data.append(i)
|
26 |
+
print("> the " + str(i) + "-th file is successfully uploaded.", end='\r')
|
27 |
+
return np.array(X_Data), np.array(Y_Data)
|
28 |
+
|
29 |
+
n_class = 33
|
30 |
+
n_train = 2000
|
31 |
+
n_test = 500
|
32 |
+
#here we upload our data (Tifinagh data)
|
33 |
+
x_train, y_train = upload_data('/media/etabook/etadisk1/EducFils/PFE/DATA2/train_data/', n_class, n_train)
|
34 |
+
x_test, y_test = upload_data('/media/etabook/etadisk1/EducFils/PFE/DATA2/test_data/', n_class, n_test)
|
35 |
+
|
36 |
+
|
37 |
+
print("The x_train's shape is :", x_train.shape)
|
38 |
+
print("The x_test's shape is :", x_test.shape)
|
39 |
+
print("The y_train's shape is :", y_train.shape)
|
40 |
+
print("The y_test's shape is :", y_test.shape)
|
41 |
+
|
42 |
+
def plot_data(num=3):
|
43 |
+
fig, axes = plt.subplots(1, num, figsize=(12, 8))
|
44 |
+
for i in range(num):
|
45 |
+
index = np.random.randint(len(x_test))
|
46 |
+
axes[i].imshow(np.reshape(x_test[index], (28, 28)))
|
47 |
+
axes[i].set_title('image label: %d' % y_test[index])
|
48 |
+
axes[i].axis('off')
|
49 |
+
|
50 |
+
plt.show()
|
51 |
+
|
52 |
+
plot_data(num=5)
|
53 |
+
|
54 |
+
plot_data(num=5)
|
55 |
+
|
56 |
+
num_classes = 33
|
57 |
+
size = 28
|
58 |
+
|
59 |
+
x_train = x_train.astype('float32')
|
60 |
+
x_test = x_test.astype('float32')
|
61 |
+
x_train = np.reshape(x_train, (x_train.shape[0], size*size))
|
62 |
+
x_test = np.reshape(x_test, (x_test.shape[0], size*size))
|
63 |
+
x_train /= 255
|
64 |
+
x_test /= 255
|
65 |
+
print('x_train shape:', x_train.shape)
|
66 |
+
print(x_train.shape[0], 'train samples')
|
67 |
+
print(x_test.shape[0], 'test samples')
|
68 |
+
|
69 |
+
# convert class vectors to binary class matrices
|
70 |
+
y_train = to_categorical(y_train, num_classes)
|
71 |
+
y_test = to_categorical(y_test, num_classes)
|
72 |
+
|
73 |
+
"""## Define our neural network model (Architecture)"""
|
74 |
+
|
75 |
+
model = Sequential()
|
76 |
+
model.add(Dense(512, input_shape=(size*size,), activation='relu'))
|
77 |
+
model.add(Dense(128, activation='relu'))
|
78 |
+
model.add(Dropout(0.3))
|
79 |
+
model.add(Dense(num_classes, activation='softmax'))
|
80 |
+
|
81 |
+
model.compile(loss=keras.losses.categorical_crossentropy,
|
82 |
+
metrics=['accuracy'])
|
83 |
+
|
84 |
+
model.summary()
|
85 |
+
|
86 |
+
"""## Model prediction on test data before training """
|
87 |
+
|
88 |
+
def plot_predictions(model, num=3):
|
89 |
+
fig, axes = plt.subplots(1, num, figsize=(12, 8))
|
90 |
+
for i in range(num):
|
91 |
+
index = np.random.randint(len(x_test))
|
92 |
+
pred = np.argmax(model.predict(np.reshape(x_test[index], (1, size*size))))
|
93 |
+
axes[i].imshow(np.reshape(x_test[index], (size, size)))
|
94 |
+
axes[i].set_title('Predicted label: '+ str(pred) + '\n/ true label :'+ str([e for e, x in enumerate(y_test[index]) if x == 1][0]))
|
95 |
+
axes[i].axis('off')
|
96 |
+
|
97 |
+
plt.show()
|
98 |
+
|
99 |
+
plot_predictions(model, num=5)
|
100 |
+
|
101 |
+
"""## Training"""
|
102 |
+
|
103 |
+
history = model.fit(x_train, y_train, batch_size=128, epochs=20, validation_data=(x_test, y_test))
|
104 |
+
|
105 |
+
"""## Model prediction on test data after training"""
|
106 |
+
|
107 |
+
plot_predictions(model, num=5)
|
108 |
+
score = model.evaluate(x_test, y_test, verbose = 0)
|
109 |
+
print('Test loss:', score[0])
|
110 |
+
print('Test accuracy:', score[1])
|
111 |
+
|
112 |
+
"""## Model history during training"""
|
113 |
+
|
114 |
+
import matplotlib.pyplot as plt
|
115 |
+
import numpy as np
|
116 |
+
with plt.xkcd():
|
117 |
+
plt.plot(history.history['accuracy'], color='c')
|
118 |
+
plt.plot(history.history['val_accuracy'], color='red')
|
119 |
+
plt.title('Tifinagh-MNIST model accuracy')
|
120 |
+
plt.legend(['acc', 'val_acc'])
|
121 |
+
plt.savefig('acc_Tifinagh_MNIST.png')
|
122 |
+
plt.show()
|
123 |
+
|
124 |
+
with plt.xkcd():
|
125 |
+
plt.plot(history.history['loss'], color='c')
|
126 |
+
plt.plot(history.history['val_loss'], color='red')
|
127 |
+
plt.title('Tifinagh-MNIST model loss')
|
128 |
+
plt.legend(['loss', 'val_loss'])
|
129 |
+
plt.savefig('loss_Tifinagh_MNIST.png')
|
130 |
+
plt.show()
|
CNN_Tifinagh_MNIST_Model.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CNN - Tifinagh-MNIST
|
2 |
+
|
3 |
+
## Libraries
|
4 |
+
"""
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import os
|
8 |
+
import cv2
|
9 |
+
from matplotlib import pyplot as plt
|
10 |
+
from tensorflow import keras
|
11 |
+
from keras.models import *
|
12 |
+
from keras.layers import *
|
13 |
+
from keras.utils import *
|
14 |
+
from tensorflow.keras.utils import to_categorical
|
15 |
+
from keras.utils.vis_utils import plot_model
|
16 |
+
|
17 |
+
"""## Data loading and adaptation """
|
18 |
+
|
19 |
+
def upload_data(path_name, number_of_class, number_of_images):
|
20 |
+
X_Data = []
|
21 |
+
Y_Data = []
|
22 |
+
for i in range(number_of_class):
|
23 |
+
images = os.listdir(path_name + str(i))
|
24 |
+
for j in range(number_of_images):
|
25 |
+
img = cv2.imread(path_name + str(i)+ '/' + images[j], 0)
|
26 |
+
X_Data.append(img)
|
27 |
+
Y_Data.append(i)
|
28 |
+
print("> the " + str(i) + "-th file is successfully uploaded.", end='\r')
|
29 |
+
return np.array(X_Data), np.array(Y_Data)
|
30 |
+
|
31 |
+
|
32 |
+
n_class = 33
|
33 |
+
n_train = 2000
|
34 |
+
n_test = 500
|
35 |
+
#here we upload our data (Tifinagh data)
|
36 |
+
x_train, y_train = upload_data('drive/MyDrive/DATA2/train_data/', n_class, n_train)
|
37 |
+
x_test, y_test = upload_data('drive/MyDrive/DATA2/test_data/', n_class, n_test)
|
38 |
+
|
39 |
+
|
40 |
+
print("The x_train's shape is :", x_train.shape)
|
41 |
+
print("The x_test's shape is :", x_test.shape)
|
42 |
+
print("The y_train's shape is :", y_train.shape)
|
43 |
+
print("The y_test's shape is :", y_test.shape)
|
44 |
+
|
45 |
+
def plot_data(num=3):
|
46 |
+
fig, axes = plt.subplots(1, num, figsize=(12, 8))
|
47 |
+
for i in range(num):
|
48 |
+
index = np.random.randint(len(x_test))
|
49 |
+
axes[i].imshow(np.reshape(x_test[index], (28, 28)))
|
50 |
+
axes[i].set_title('image label: %d' % y_test[index])
|
51 |
+
axes[i].axis('off')
|
52 |
+
|
53 |
+
plt.show()
|
54 |
+
|
55 |
+
plot_data(num=5)
|
56 |
+
|
57 |
+
x_train = x_train.astype('float32')
|
58 |
+
x_test = x_test.astype('float32')
|
59 |
+
x_train /= 255
|
60 |
+
x_test /= 255
|
61 |
+
print('x_train shape:', x_train.shape)
|
62 |
+
print(x_train.shape[0], 'train samples')
|
63 |
+
print(x_test.shape[0], 'test samples')
|
64 |
+
y_train = to_categorical(y_train, n_class)
|
65 |
+
y_test = to_categorical(y_test, n_class)
|
66 |
+
|
67 |
+
"""## Architecture of the model"""
|
68 |
+
|
69 |
+
def define_model(input_size = (28, 28, 1)):
|
70 |
+
inputs = Input(input_size)
|
71 |
+
conv1 = Conv2D(128, 3, activation='relu', padding='same')(inputs)
|
72 |
+
conv1 = Conv2D(128, 3, activation='relu', padding='same')(conv1)
|
73 |
+
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
|
74 |
+
|
75 |
+
|
76 |
+
conv3 = Conv2D(64, 3, activation='relu', padding='same')(pool1)
|
77 |
+
conv3 = Conv2D(64, 3, activation='relu', padding='same')(conv3)
|
78 |
+
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
|
79 |
+
|
80 |
+
conv4 = Conv2D(32, 3, activation='relu', padding='same')(pool3)
|
81 |
+
|
82 |
+
fltt = Flatten()(conv4)
|
83 |
+
|
84 |
+
dan = Dense(33, activation='softmax')(fltt)
|
85 |
+
|
86 |
+
model = Model(inputs=inputs, outputs=dan)
|
87 |
+
|
88 |
+
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])
|
89 |
+
|
90 |
+
|
91 |
+
return model
|
92 |
+
|
93 |
+
model = define_model((28, 28, 1))
|
94 |
+
model.summary()
|
95 |
+
|
96 |
+
his = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_data=(x_test, y_test))
|
97 |
+
|
98 |
+
"""## Model prediction on test data after training"""
|
99 |
+
|
100 |
+
def plot_predictions(model, num=3):
|
101 |
+
fig, axes = plt.subplots(1, num, figsize=(12, 8))
|
102 |
+
for i in range(num):
|
103 |
+
index = np.random.randint(len(y_test))
|
104 |
+
pred = np.argmax(model.predict(np.reshape(x_test[index], (1, 28, 28))))
|
105 |
+
axes[i].imshow(np.reshape(x_test[index], (28, 28)))
|
106 |
+
axes[i].set_title('Predicted label: '+ str(pred) + '\n/ true label :'+ str([e for e, x in enumerate(y_test[index]) if x == 1][0]))
|
107 |
+
axes[i].axis('off')
|
108 |
+
|
109 |
+
plt.show()
|
110 |
+
|
111 |
+
|
112 |
+
plot_predictions(model, num=5)
|
113 |
+
score = model.evaluate(x_test, y_test, verbose = 0)
|
114 |
+
print('Test loss:', score[0])
|
115 |
+
print('Test accuracy:', score[1])
|
116 |
+
|
117 |
+
import matplotlib.pyplot as plt
|
118 |
+
import numpy as np
|
119 |
+
with plt.xkcd():
|
120 |
+
plt.plot(his.history['accuracy'], color='c')
|
121 |
+
plt.plot(his.history['val_accuracy'], color='red')
|
122 |
+
plt.title('Tifinagh-MNIST model accuracy')
|
123 |
+
plt.legend(['acc', 'val_acc'])
|
124 |
+
plt.savefig('acc_Tifinagh_MNIST_cnn.png')
|
125 |
+
plt.show()
|
126 |
+
|
127 |
+
with plt.xkcd():
|
128 |
+
plt.plot(his.history['loss'], color='c')
|
129 |
+
plt.plot(his.history['val_loss'], color='red')
|
130 |
+
plt.title('Tifinagh-MNIST model loss')
|
131 |
+
plt.legend(['loss', 'val_loss'])
|
132 |
+
plt.savefig('loss_Tifinagh_MNIST_cnn.png')
|
133 |
+
plt.show()
|
Gans_For_One_Simples_Tifinagh_MNIST.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Gans for one Tifinagh-MNIST letter
|
2 |
+
|
3 |
+
## The libraries we will use
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import cv2
|
8 |
+
from numpy import array
|
9 |
+
from numpy import expand_dims
|
10 |
+
from numpy import zeros
|
11 |
+
from numpy import ones
|
12 |
+
from numpy import vstack
|
13 |
+
from numpy.random import randn
|
14 |
+
from numpy.random import randint
|
15 |
+
from keras.datasets.mnist import load_data
|
16 |
+
from keras.optimizer_v2.adam import Adam
|
17 |
+
from keras.models import Sequential
|
18 |
+
from keras.layers import Dense
|
19 |
+
from keras.layers import Reshape
|
20 |
+
from keras.layers import Flatten
|
21 |
+
from keras.layers import Conv2D
|
22 |
+
from keras.layers import Conv2DTranspose
|
23 |
+
from keras.layers import LeakyReLU
|
24 |
+
from keras.layers import Dropout
|
25 |
+
from matplotlib import pyplot
|
26 |
+
|
27 |
+
n_class = 1 #number of classes
|
28 |
+
n_train_Tifinagh_mnist = 2000
|
29 |
+
|
30 |
+
def upload_data_Tfinagh_MNIST(path_name, number_of_class, number_of_images):
|
31 |
+
X_Data = []
|
32 |
+
for i in range(number_of_class):
|
33 |
+
images = os.listdir(path_name + str(i+11))
|
34 |
+
for j in range(number_of_images):
|
35 |
+
img = cv2.imread(path_name + str(i+11)+ '/' + images[j], 0)
|
36 |
+
X_Data.append(img)
|
37 |
+
print("> the " + str(i) + "-th file is successfully uploaded.", end='\r')
|
38 |
+
return array(X_Data)
|
39 |
+
|
40 |
+
n_class = 1 #number of classes
|
41 |
+
n_train_Tifinagh_mnist = 2000
|
42 |
+
|
43 |
+
def upload_data_Tfinagh_MNIST(path_name, num_of_class, number_of_images):
|
44 |
+
X_Data = []
|
45 |
+
images = os.listdir(path_name + str(num_of_class))
|
46 |
+
for j in range(len(images)):
|
47 |
+
img = cv2.imread(path_name + str(num_of_class) + '/' + images[j], 0)
|
48 |
+
X_Data.append(img)
|
49 |
+
return array(X_Data)
|
50 |
+
|
51 |
+
def define_discriminator(in_shape=(28,28,1)):
|
52 |
+
model = Sequential()
|
53 |
+
model.add(Conv2D(64, (3,3), strides=(2, 2), padding='same', input_shape=in_shape))
|
54 |
+
model.add(LeakyReLU(alpha=0.2))
|
55 |
+
model.add(Dropout(0.4))
|
56 |
+
model.add(Conv2D(64, (3,3), strides=(2, 2), padding='same'))
|
57 |
+
model.add(LeakyReLU(alpha=0.2))
|
58 |
+
model.add(Dropout(0.4))
|
59 |
+
model.add(Flatten())
|
60 |
+
model.add(Dense(1, activation='sigmoid'))
|
61 |
+
# compile model
|
62 |
+
opt = Adam(lr=0.0002, beta_1=0.5)
|
63 |
+
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
|
64 |
+
return model
|
65 |
+
|
66 |
+
def define_generator(latent_dim):
|
67 |
+
model = Sequential()
|
68 |
+
n_nodes = 128 * 7 * 7
|
69 |
+
model.add(Dense(n_nodes, input_dim=latent_dim))
|
70 |
+
model.add(LeakyReLU(alpha=0.2))
|
71 |
+
model.add(Reshape((7, 7, 128)))
|
72 |
+
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
|
73 |
+
model.add(LeakyReLU(alpha=0.2))
|
74 |
+
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same'))
|
75 |
+
model.add(LeakyReLU(alpha=0.2))
|
76 |
+
model.add(Conv2D(1, (7,7), activation='sigmoid', padding='same'))
|
77 |
+
return model
|
78 |
+
|
79 |
+
def define_gan(g_model, d_model):
|
80 |
+
d_model.trainable = False
|
81 |
+
model = Sequential()
|
82 |
+
model.add(g_model)
|
83 |
+
model.add(d_model)
|
84 |
+
opt = Adam(lr=0.0002, beta_1=0.5)
|
85 |
+
model.compile(loss='binary_crossentropy', optimizer=opt)
|
86 |
+
return model
|
87 |
+
|
88 |
+
def load_real_samples(num_class):
|
89 |
+
trainX = upload_data_Tfinagh_MNIST('drive/MyDrive/DATA2/train_data/', num_class, n_train_Tifinagh_mnist)
|
90 |
+
X = expand_dims(trainX, axis=-1)
|
91 |
+
X = X.astype('float32')
|
92 |
+
X = X / 255.0
|
93 |
+
return X
|
94 |
+
|
95 |
+
def generate_real_samples(dataset, n_samples):
|
96 |
+
ix = randint(0, dataset.shape[0], n_samples)
|
97 |
+
X = dataset[ix]
|
98 |
+
y = ones((n_samples, 1))
|
99 |
+
return X, y
|
100 |
+
|
101 |
+
def generate_latent_points(latent_dim, n_samples):
|
102 |
+
x_input = randn(latent_dim * n_samples)
|
103 |
+
x_input = x_input.reshape(n_samples, latent_dim)
|
104 |
+
return x_input
|
105 |
+
|
106 |
+
def generate_fake_samples(g_model, latent_dim, n_samples):
|
107 |
+
x_input = generate_latent_points(latent_dim, n_samples)
|
108 |
+
X = g_model.predict(x_input)
|
109 |
+
y = zeros((n_samples, 1))
|
110 |
+
return X, y
|
111 |
+
|
112 |
+
def save_plot(examples, epoch, n=10):
|
113 |
+
for i in range(n * n):
|
114 |
+
pyplot.subplot(n, n, 1 + i)
|
115 |
+
pyplot.axis('off')
|
116 |
+
pyplot.imshow(examples[i, :, :, 0], cmap='gray_r')
|
117 |
+
filename = 'generated_plot_e%03d.png' % (epoch+1)
|
118 |
+
pyplot.savefig(filename)
|
119 |
+
pyplot.close()
|
120 |
+
|
121 |
+
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=100):
|
122 |
+
X_real, y_real = generate_real_samples(dataset, n_samples)
|
123 |
+
_, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
|
124 |
+
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, n_samples)
|
125 |
+
_, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
|
126 |
+
print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real*100, acc_fake*100))
|
127 |
+
#save_plot(x_fake, epoch)
|
128 |
+
filename = 'generator_model_%03d.h5' % (epoch + 1)
|
129 |
+
g_model.save(filename)
|
130 |
+
|
131 |
+
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=100, n_batch=128):
|
132 |
+
bat_per_epo = int(dataset.shape[0] / n_batch)
|
133 |
+
half_batch = int(n_batch / 2)
|
134 |
+
for i in range(n_epochs):
|
135 |
+
for j in range(bat_per_epo):
|
136 |
+
X_real, y_real = generate_real_samples(dataset, half_batch)
|
137 |
+
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
|
138 |
+
X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))
|
139 |
+
d_loss, _ = d_model.train_on_batch(X, y)
|
140 |
+
X_gan = generate_latent_points(latent_dim, n_batch)
|
141 |
+
y_gan = ones((n_batch, 1))
|
142 |
+
g_loss = gan_model.train_on_batch(X_gan, y_gan)
|
143 |
+
print('>%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
|
144 |
+
if (i+1) % 10 == 0:
|
145 |
+
summarize_performance(i, g_model, d_model, dataset, latent_dim)
|
146 |
+
|
147 |
+
# size of the latent space
|
148 |
+
latent_dim = 100
|
149 |
+
# create the discriminator
|
150 |
+
d_model = define_discriminator()
|
151 |
+
# create the generator
|
152 |
+
g_model = define_generator(latent_dim)
|
153 |
+
# create the gan
|
154 |
+
gan_model = define_gan(g_model, d_model)
|
155 |
+
# load image data
|
156 |
+
dataset = load_real_samples(29)
|
157 |
+
# train model
|
158 |
+
train(g_model, d_model, gan_model, dataset, latent_dim)
|
159 |
+
|
160 |
+
z = generate_latent_points(100, 9)
|
161 |
+
im = g_model.predict(z)
|
162 |
+
|
163 |
+
from matplotlib import pyplot as plt
|
164 |
+
plt.figure(figsize=(9, 9))
|
165 |
+
for i in range(9):
|
166 |
+
orig_map=plt.cm.get_cmap()
|
167 |
+
|
168 |
+
# reversing the original colormap using reversed() function
|
169 |
+
reversed_map = orig_map.reversed()
|
170 |
+
plt.subplot(3, 3, i+1)
|
171 |
+
plt.imshow(im[i, :, :, 0], cmap = reversed_map);
|
172 |
+
plt.axis('off')
|
README.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Contribution to creation, classification and generation of a new hand-written Tifinagh alphabet letters dataset using ANN, CNN and GAN
|
3 |
+
|
4 |
+
In this work, we present a new dataset called Tifinagh-MNIST: handwritten letters of the Tifinagh alphabetic script, which is used to write the Tamazight languages. The presented dataset contains 82,500 gray scale images of size 28 x 28 pixels belonging to 33 classes (letters), with 2500 images per class. In particular, the training set is composed of 66,000 images while the test set contains 16,500 images. Tifinagh-MNIST is intended for the development of AI tools to process handwritten Tifinagh characters. We also propose use cases of this corpus through neural network models for classification and data generation. The corpus will be made available to the public in order to promote the development of artificial intelligence solutions for the processing of handwritten Tamazight texts.
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
![Logo](https://media.licdn.com/dms/image/C4E22AQGE6ZG-gNJhXg/feedshare-shrink_800/0/1656066492444?e=1678924800&v=beta&t=6on0spkXPm5NK4kY4SFEttnzkUrII9OUfvHxWeM8zO0)
|
10 |
+
|
T_SNE_Tifinagh_MNIST.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Visualization of the Tifinag-MNIST database using the T-SNE algorithm
|
2 |
+
|
3 |
+
## The libraries we will use
|
4 |
+
"""
|
5 |
+
|
6 |
+
import time
|
7 |
+
import os
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
+
import pandas as pd
|
11 |
+
from sklearn.manifold import TSNE
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
import seaborn as sns
|
14 |
+
|
15 |
+
"""## Data loading and adaptation"""
|
16 |
+
|
17 |
+
def upload_data(path_name, number_of_class, number_of_images):
|
18 |
+
X_Data = []
|
19 |
+
Y_Data = []
|
20 |
+
for i in range(number_of_class):
|
21 |
+
images = os.listdir(path_name + str(i))
|
22 |
+
for j in range(number_of_images):
|
23 |
+
img = cv2.imread(path_name + str(i)+ '/' + images[j], 0)
|
24 |
+
X_Data.append(img)
|
25 |
+
Y_Data.append(i)
|
26 |
+
print("> the " + str(i) + "-th file is successfully uploaded.", end='\r')
|
27 |
+
return np.array(X_Data), np.array(Y_Data)
|
28 |
+
|
29 |
+
n_class = 33
|
30 |
+
n_train = 2000
|
31 |
+
|
32 |
+
x_data, y_data = upload_data('/media/etabook/etadisk1/EducFils/PFE/DATA2/train_data/', n_class, n_train)
|
33 |
+
|
34 |
+
x_data = x_data.astype('float32')
|
35 |
+
x_data = np.reshape(x_data, (x_data.shape[0], 28*28))
|
36 |
+
x_data /= 255
|
37 |
+
print('x_data shape:', x_data.shape)
|
38 |
+
print(x_data.shape[0], 'data samples')
|
39 |
+
|
40 |
+
"""## Convert images and label vector to a Pandas DataFrame"""
|
41 |
+
|
42 |
+
feat_cols = [ 'pixel'+str(i) for i in range(x_data.shape[1]) ]
|
43 |
+
df = pd.DataFrame(x_data,columns=feat_cols)
|
44 |
+
df['y'] = y_data
|
45 |
+
df['label'] = df['y'].apply(lambda i: str(i))
|
46 |
+
x_data, y_data = None, None
|
47 |
+
print('Size of the dataframe: {}'.format(df.shape))
|
48 |
+
df.head()
|
49 |
+
|
50 |
+
"""## Displaying images from the Dataframe"""
|
51 |
+
|
52 |
+
np.random.seed(42)
|
53 |
+
rndperm = np.random.permutation(df.shape[0])
|
54 |
+
|
55 |
+
plt.gray()
|
56 |
+
fig = plt.figure( figsize=(18,12) )
|
57 |
+
for i in range(0,15):
|
58 |
+
ax = fig.add_subplot(3,5,i+1, title="Letter: {}".format(str(df.loc[rndperm[i],'label'])) )
|
59 |
+
ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((28,28)).astype(float))
|
60 |
+
plt.show()
|
61 |
+
|
62 |
+
"""## Launch of the T-SNE algorithm
|
63 |
+
|
64 |
+
|
65 |
+
"""
|
66 |
+
|
67 |
+
N = 50000
|
68 |
+
df_subset = df.loc[rndperm[:N],:].copy()
|
69 |
+
data_subset = df_subset[feat_cols].values
|
70 |
+
|
71 |
+
time_start = time.time()
|
72 |
+
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
|
73 |
+
tsne_results = tsne.fit_transform(data_subset)
|
74 |
+
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
|
75 |
+
|
76 |
+
"""## Visualisation"""
|
77 |
+
|
78 |
+
df_subset['tsne-2d-one'] = tsne_results[:,0]
|
79 |
+
df_subset['tsne-2d-two'] = tsne_results[:,1]
|
80 |
+
plt.figure(figsize=(16,10))
|
81 |
+
sns.scatterplot(
|
82 |
+
x="tsne-2d-one", y="tsne-2d-two",
|
83 |
+
hue="y",
|
84 |
+
palette=sns.color_palette("hls", 33),
|
85 |
+
data=df_subset,
|
86 |
+
legend="full",
|
87 |
+
alpha=0.3
|
88 |
+
)
|