kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
18,968,817
outlier = train_data.loc[train_data.target < 1.0] print(outlier )<drop_column>
submission = pd.DataFrame({'ImageId' : range(1,28001), 'Label' : list(subs)}) submission.head(10) submission.shape
Digit Recognizer
18,968,817
<prepare_x_and_y><EOS>
submission.to_csv("submission1.csv", index = False )
Digit Recognizer
18,871,430
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
!nvidia-smi
Digit Recognizer
18,871,430
params = { 'n_estimators' : [1500, 2000, 2500], 'learning_rate' : [0.01, 0.02] } xgb = XGBRegressor( objective = 'reg:squarederror', subsample = 0.8, colsample_bytree = 0.8, learning_rate = 0.01, tree_method = 'gpu_hist') grid_search = GridSearchCV(xgb, param_grid = params, scoring = 'neg_root_mean_squared_error', n_jobs = -1, verbose = 10) grid_search.fit(train_data, y_train) print(' Best estimator:') print(grid_search.best_estimator_) print(' Best score:') print(grid_search.best_score_) print(' Best hyperparameters:') print(grid_search.best_params_ )<train_model>
%matplotlib inline sns.set(style='white', context='notebook', palette='deep') np.random.seed(2 )
Digit Recognizer
18,871,430
clf = XGBRegressor( objective = 'reg:squarederror', subsample = 0.8, learning_rate = 0.02, max_depth = 7, n_estimators = 2500, tree_method = 'gpu_hist') clf.fit(train_data, y_train) y_pred_xgb = clf.predict(test_data) print(y_pred_xgb )<save_to_csv>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
18,871,430
solution = pd.DataFrame({"id":test_data.id, "target":y_pred_xgb}) solution.to_csv("solution.csv", index = False) print("saved successful!" )<install_modules>
y_train = train["label"] X_train = train.drop(labels=["label"], axis = 1 )
Digit Recognizer
18,871,430
!pip install.. /input/efficientnet/efficientnet-1.0.0-py3-none-any.whl<import_modules>
( X_train1, y_train1),(X_test1, y_test1)= mnist.load_data() X_train1 = np.concatenate([X_train1, X_test1], axis=0) y_train1 = np.concatenate([y_train1, y_test1], axis=0) X_train1 = X_train1.reshape(-1, 28*28 )
Digit Recognizer
18,871,430
import pandas as pd import tensorflow as tf import cv2 import glob from tqdm.notebook import tqdm import numpy as np import os import efficientnet.keras as efn from keras.layers import * from keras import Model import matplotlib.pyplot as plt import time<load_pretrained>
X_train = X_train/255. X_train1 = X_train1/255. test = test/255 .
Digit Recognizer
18,871,430
detection_graph = tf.Graph() with detection_graph.as_default() : od_graph_def = tf.compat.v1.GraphDef() with tf.io.gfile.GFile('.. /input/mobilenet-face/frozen_inference_graph_face.pb', 'rb')as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='' )<set_options>
X_train = np.concatenate(( X_train.values, X_train1)) y_train = np.concatenate(( y_train, y_train1))
Digit Recognizer
18,871,430
cm = detection_graph.as_default() cm.__enter__()<prepare_x_and_y>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer
18,871,430
config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True sess=tf.compat.v1.Session(graph=detection_graph, config=config) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') boxes_tensor = detection_graph.get_tensor_by_name('detection_boxes:0') scores_tensor = detection_graph.get_tensor_by_name('detection_scores:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0' )<categorify>
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state = 2 )
Digit Recognizer
18,871,430
def get_img(images): global boxes,scores,num_detections im_heights,im_widths=[],[] imgs=[] for image in images: (im_height,im_width)=image.shape[:-1] imgs.append(image) im_heights.append(im_height) im_widths.append(im_widths) imgs=np.array(imgs) (boxes, scores_)= sess.run( [boxes_tensor, scores_tensor], feed_dict={image_tensor: imgs}) finals=[] for x in range(boxes.shape[0]): scores=scores_[x] max_=np.where(scores==scores.max())[0][0] box=boxes[x][max_] ymin, xmin, ymax, xmax = box (left, right, top, bottom)=(xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) left, right, top, bottom = int(left), int(right), int(top), int(bottom) image=imgs[x] finals.append(cv2.cvtColor(cv2.resize(image[max([0,top-40]):bottom+80,max([0,left-40]):right+80],(240,240)) ,cv2.COLOR_BGR2RGB)) return finals def detect_video(video, start_frame): frame_count=10 capture = cv2.VideoCapture(video) v_len = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) frame_idxs = np.linspace(start_frame,v_len,frame_count, endpoint=False, dtype=np.int) imgs=[] i=0 for frame_idx in range(int(v_len)) : ret = capture.grab() if not ret: print("Error grabbing frame %d from movie %s" %(frame_idx, video)) if frame_idx >= frame_idxs[i]: if frame_idx-frame_idxs[i]>20: return None ret, frame = capture.retrieve() if not ret or frame is None: print("Error retrieving frame %d from movie %s" %(frame_idx, video)) else: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) imgs.append(frame) i += 1 if i >= len(frame_idxs): break imgs=get_img(imgs) if len(imgs)<10: return None return np.hstack(imgs) <train_model>
print(f"Training shape {X_train.shape} Validation shape {X_val.shape}" )
Digit Recognizer
18,871,430
os.mkdir('./videos/') for x in tqdm(glob.glob('.. /input/deepfake-detection-challenge/test_videos/*.mp4')) : try: filename=x.replace('.. /input/deepfake-detection-challenge/test_videos/','' ).replace('.mp4','.jpg') a=detect_video(x,0) if a is None: continue cv2.imwrite('./videos/'+filename,a) except Exception as err: print(err )<train_model>
model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(512,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(BatchNormalization()) model.add(Dense(256)) model.add(BatchNormalization()) model.add(Dense(128)) model.add(BatchNormalization()) model.add(Dense(10, activation='softmax'))
Digit Recognizer
18,871,430
os.mkdir('./videos_2/') for x in tqdm(glob.glob('.. /input/deepfake-detection-challenge/test_videos/*.mp4')) : try: filename=x.replace('.. /input/deepfake-detection-challenge/test_videos/','' ).replace('.mp4','.jpg') a=detect_video(x,95) if a is None: continue cv2.imwrite('./videos_2/'+filename,a) except Exception as err: print(err )<choose_model_class>
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True) Image('model.png' )
Digit Recognizer
18,871,430
bottleneck = efn.EfficientNetB1(weights=None,include_top=False,pooling='avg') inp=Input(( 10,240,240,3)) x=TimeDistributed(bottleneck )(inp) x = LSTM(128 )(x) x = Dense(64, activation='elu' )(x) x = Dense(1,activation='sigmoid' )(x )<define_variables>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
18,871,430
model=Model(inp,x) weights = ['.. /input/deepfake-20/saved-model-01-0.06.hdf5', '.. /input/deepfake-20/saved-model-02-0.05.hdf5', '.. /input/model-epoch-3/saved-model-03-0.06.hdf5','.. /input/model-02/saved-model-01-0.06.hdf5']*2 sub_file = ['submission_'+str(i)+'.csv' for i in range(1,9)] video = ['./videos/']*4+['./videos_2/']*4 for xxxxx in range(8): start = time.time() model.load_weights(weights[xxxxx]) def get_birghtness(img): return img/img.max() def process_img(img,flip=[False]*10): imgs=[] for x in range(10): if flip[x]: imgs.append(get_birghtness(cv2.flip(img[:,x*240:(x+1)*240,:],1))) else: imgs.append(get_birghtness(img[:,x*240:(x+1)*240,:])) return np.array(imgs) sample_submission = pd.read_csv(".. /input/deepfake-detection-challenge/sample_submission.csv") test_files=glob.glob(video[xxxxx]+'*.jpg') submission=pd.DataFrame() submission['filename']=os.listdir(( '.. /input/deepfake-detection-challenge/test_videos/')) submission['label']=0.5 filenames=[] batch=[] batch1=[] batch2=[] batch3=[] preds=[] for x in test_files: img=process_img(cv2.cvtColor(cv2.imread(x),cv2.COLOR_BGR2RGB)) if img is None: continue batch.append(img) batch1.append(process_img(cv2.cvtColor(cv2.imread(x),cv2.COLOR_BGR2RGB),[True]*10)) batch2.append(process_img(cv2.cvtColor(cv2.imread(x),cv2.COLOR_BGR2RGB),[True,False]*5)) batch3.append(process_img(cv2.cvtColor(cv2.imread(x),cv2.COLOR_BGR2RGB),[False,True]*5)) filenames.append(x.replace(video[xxxxx],'' ).replace('.jpg','.mp4')) if len(batch)==16: preds+=(((0.25*model.predict(np.array(batch)))) +(( 0.25*model.predict(np.array(batch1)))) +(( 0.25*model.predict(np.array(batch2)))) +(( 0.25*model.predict(np.array(batch3)))) ).tolist() batch=[] batch1=[] batch2=[] batch3=[] if len(batch)!=0: preds+=(((0.25*model.predict(np.array(batch)))) +(( 0.25*model.predict(np.array(batch1)))) +(( 0.25*model.predict(np.array(batch2)))) +(( 0.25*model.predict(np.array(batch3)))) ).tolist() print(time.time() -start) new_preds=[] for x in preds: new_preds.append(x[0]) print(sum(new_preds)/len(new_preds)) for x,y in zip(new_preds,filenames): submission.loc[submission['filename']==y,'label']=min([max([0.05,x]),0.95]) submission.to_csv(sub_file[xxxxx], index=False )<set_options>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.2, min_lr=0.00001) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15) checkpoint = ModelCheckpoint(filepath='model.h5', monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True )
Digit Recognizer
18,871,430
!rm -r videos !rm -r videos_2<load_from_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
18,871,430
df1 = pd.read_csv('submission_1.csv' ).set_index('filename' ).transpose().to_dict() df2 = pd.read_csv('submission_2.csv' ).set_index('filename' ).transpose().to_dict() df3 = pd.read_csv('submission_3.csv' ).set_index('filename' ).transpose().to_dict() df4 = pd.read_csv('submission_4.csv' ).set_index('filename' ).transpose().to_dict() df5 = pd.read_csv('submission_5.csv' ).set_index('filename' ).transpose().to_dict() df6 = pd.read_csv('submission_6.csv' ).set_index('filename' ).transpose().to_dict() df7 = pd.read_csv('submission_7.csv' ).set_index('filename' ).transpose().to_dict() df8 = pd.read_csv('submission_8.csv' ).set_index('filename' ).transpose().to_dict() filename = [] label = [] for i in df1.keys() : filename.append(i) a = [] if df1[i]['label']!=0.5: a.append(df1[i]['label']) if df2[i]['label']!=0.5: a.append(df2[i]['label']) if df3[i]['label']!=0.5: a.append(df3[i]['label']) if df4[i]['label']!=0.5: a.append(df4[i]['label']) if df5[i]['label']!=0.5: a.append(df5[i]['label']) if df6[i]['label']!=0.5: a.append(df6[i]['label']) if df7[i]['label']!=0.5: a.append(df7[i]['label']) if df8[i]['label']!=0.5: a.append(df8[i]['label']) if len(a)==0: label.append(0.5) else: label.append(min([max([0.05,sum(a)/len(a)]),0.95])) df = pd.DataFrame() df['filename'] = filename df['label'] = label print(np.array(df['label'] ).mean()) df.to_csv('submission.csv', index=False )<load_from_csv>
epochs = 50 batch_size = 128
Digit Recognizer
18,871,430
!rm submission_1.csv !rm submission_2.csv !rm submission_3.csv !rm submission_4.csv !rm submission_5.csv !rm submission_6.csv !rm submission_7.csv !rm submission_8.csv<set_options>
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(X_val, y_val), verbose=2, steps_per_epoch=X_train.shape[0]//batch_size, callbacks=[learning_rate_reduction, es, checkpoint] )
Digit Recognizer
18,871,430
%matplotlib inline warnings.filterwarnings('ignore') pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 200 )<load_from_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
18,871,430
<count_missing_values><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
18,752,947
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
from matplotlib import pyplot as plt import os import scipy import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import seaborn as sns from sklearn.model_selection import train_test_split import cv2
Digit Recognizer
18,752,947
app_train['TARGET'].value_counts()<define_variables>
main_path = r".. /input/digit-recognizer" train_df = pd.read_csv(os.path.join(main_path, "train.csv")) test_df = pd.read_csv(os.path.join(main_path, "test.csv"))
Digit Recognizer
18,752,947
columns = ['AMT_INCOME_TOTAL','AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE', 'DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_ID_PUBLISH', 'DAYS_REGISTRATION', 'DAYS_LAST_PHONE_CHANGE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT', 'EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR'] show_hist_by_target(app_train, columns )<feature_engineering>
x_train = train_df.drop(labels=["label"], axis=1) y_train = train_df["label"] y_train.head()
Digit Recognizer
18,752,947
app_train['DAYS_BIRTH']=abs(app_train['DAYS_BIRTH']) app_train['DAYS_BIRTH'].corr(app_train['TARGET'] )<count_missing_values>
x_train = x_train.to_numpy() / 255.0 x_test = test_df.to_numpy() / 255.0 x_train = x_train.reshape(-1, 28, 28, 1) x_test = x_test.reshape(-1, 28, 28, 1) y_train = to_categorical(y_train) plt.imshow(x_train[125, :, :, :] )
Digit Recognizer
18,752,947
app_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].isnull().sum()<count_values>
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.15, random_state=2) datagen = ImageDataGenerator( rotation_range=27, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.3, zoom_range=0.2 )
Digit Recognizer
18,752,947
app_train['EXT_SOURCE_3'].value_counts(dropna=False )<count_values>
model = Sequential() model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax')) model.summary()
Digit Recognizer
18,752,947
cond_1 =(app_train['TARGET'] == 1) cond_0 =(app_train['TARGET'] == 0) print(app_train['CODE_GENDER'].value_counts() /app_train.shape[0]) print(' 연체인 경우 ',app_train[cond_1]['CODE_GENDER'].value_counts() /app_train[cond_1].shape[0]) print(' 연체가 아닌 경우 ',app_train[cond_0]['CODE_GENDER'].value_counts() /app_train[cond_0].shape[0] )<load_from_csv>
class myCallback(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('val_accuracy')> 0.9955): print("Stop training!") self.model.stop_training = True optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"]) reduce_lr = ReduceLROnPlateau( monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001 ) epoch_end = myCallback()
Digit Recognizer
18,752,947
app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv') app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' )<concatenate>
history = model.fit(datagen.flow(x_train, y_train, batch_size=256), epochs=200, validation_data=(x_val, y_val), verbose=1, steps_per_epoch=x_train.shape[0]/256, callbacks=[reduce_lr, epoch_end] )
Digit Recognizer
18,752,947
<feature_engineering><EOS>
results = model.predict(x_test) results = np.argmax(results, axis=1) submission = pd.read_csv(os.path.join(main_path, "sample_submission.csv")) image_id = range(1, x_test.shape[0]+1) submission = pd.DataFrame({'Imageid':image_id, 'Label':results}) submission.to_csv('cnn2_submission.csv', index=False )
Digit Recognizer
16,960,601
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
%matplotlib inline
Digit Recognizer
16,960,601
prev_app = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv') print(prev_app.shape, apps.shape )<merge>
train = import_data('.. /input/digit-recognizer/train.csv') test = import_data('.. /input/digit-recognizer/test.csv') y_lab = train['label'] y = tf.keras.utils.to_categorical(y_lab) train.drop('label', axis=1, inplace=True )
Digit Recognizer
16,960,601
prev_app_outer = prev_app.merge(apps['SK_ID_CURR'], on='SK_ID_CURR', how='outer', indicator=True )<count_values>
train_df = np.array(train ).reshape(-1, 28, 28, 1) test_df = np.array(test ).reshape(-1, 28, 28, 1) del train del test del y_lab
Digit Recognizer
16,960,601
prev_app_outer['_merge'].value_counts()<sort_values>
def change_size(image): img = array_to_img(image, scale=False) img = img.resize(( 75, 75)) img = img.convert(mode='RGB') arr = img_to_array(img) return arr.astype(np.float32 )
Digit Recognizer
16,960,601
def missing_data(data): total = data.isnull().sum().sort_values(ascending = False) percent =(data.isnull().sum() /data.isnull().count() *100 ).sort_values(ascending = False) return pd.concat([total, percent], axis=1, keys=['Total', 'Percent'] )<groupby>
train_array = [change_size(img)for img in train_df] train = np.array(train_array) del train_array test_array = [change_size(img)for img in test_df] test = np.array(test_array) del test_array
Digit Recognizer
16,960,601
prev_app.groupby('SK_ID_CURR' ).count()<groupby>
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False): def eraser(input_img): if input_img.ndim == 3: img_h, img_w, img_c = input_img.shape elif input_img.ndim == 2: img_h, img_w = input_img.shape p_1 = np.random.rand() if p_1 > p: return input_img while True: s = np.random.uniform(s_l, s_h)* img_h * img_w r = np.random.uniform(r_1, r_2) w = int(np.sqrt(s / r)) h = int(np.sqrt(s * r)) left = np.random.randint(0, img_w) top = np.random.randint(0, img_h) if left + w <= img_w and top + h <= img_h: break if pixel_level: if input_img.ndim == 3: c = np.random.uniform(v_l, v_h,(h, w, img_c)) if input_img.ndim == 2: c = np.random.uniform(v_l, v_h,(h, w)) else: c = np.random.uniform(v_l, v_h) input_img[top:top + h, left:left + w] = c return input_img return eraser
Digit Recognizer
16,960,601
prev_app.groupby('SK_ID_CURR')['SK_ID_CURR'].count()<merge>
image_gen = ImageDataGenerator(rescale=1./255, featurewise_center=False, preprocessing_function=get_random_eraser(v_l=0, v_h=1), samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zoom_range=0.1, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.3, validation_split=0.2) train_generator = image_gen.flow(train, y, batch_size=32, shuffle=True, subset='training', seed=42) valid_generator = image_gen.flow(train, y, batch_size=16, shuffle=True, subset='validation') del train_df del test_df del train
Digit Recognizer
16,960,601
app_prev_target = prev_app.merge(app_train[['SK_ID_CURR', 'TARGET']], on='SK_ID_CURR', how='left') app_prev_target.shape<define_variables>
model = Sequential() model.add(tf.keras.applications.resnet50.ResNet50(input_shape =(75, 75, 3), pooling = 'avg', include_top = False, weights = 'imagenet')) model.add(L.Flatten()) model.add(L.Dense(128, activation='relu')) model.add(L.Dense(10, activation='softmax')) model.compile(optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0), loss='categorical_crossentropy', metrics=['accuracy']) learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
Digit Recognizer
16,960,601
num_columns = [column for column in num_columns if column not in ['SK_ID_PREV', 'SK_ID_CURR', 'TARGET']] num_columns<count_values>
for layer in model.layers[0].layers: if layer.name == 'conv5_block1_0_conv': break layer.trainable=False
Digit Recognizer
16,960,601
app_prev_target.TARGET.value_counts()<groupby>
history = model.fit(train_generator, validation_data=valid_generator, epochs=20, steps_per_epoch=train_generator.n//train_generator.batch_size, validation_steps=valid_generator.n//valid_generator.batch_size, callbacks=[learning_rate_reduction] )
Digit Recognizer
16,960,601
print(app_prev_target.groupby('TARGET' ).agg({'AMT_ANNUITY': ['mean', 'median', 'count']})) print(app_prev_target.groupby('TARGET' ).agg({'AMT_APPLICATION': ['mean', 'median', 'count']})) print(app_prev_target.groupby('TARGET' ).agg({'AMT_CREDIT': ['mean', 'median', 'count']}))<groupby>
test = test/255
Digit Recognizer
16,960,601
<groupby><EOS>
res = model.predict(test[:]) output = pd.DataFrame({'ImageId':[ i+1 for i in range(len(res)) ], 'Label': [ xi.argmax() for xi in res]}) output.to_csv('submission_grid.csv', index=False )
Digit Recognizer
18,665,924
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<groupby>
warnings.filterwarnings("ignore") %matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
18,665,924
prev_group = prev_app.groupby('SK_ID_CURR') prev_group.head()<create_dataframe>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') print("Data are Ready!!" )
Digit Recognizer
18,665,924
prev_agg = pd.DataFrame() prev_agg['CNT'] = prev_group['SK_ID_CURR'].count() prev_agg.head()<feature_engineering>
print(f"Training data size is {train.shape} Testing data size is {test.shape}" )
Digit Recognizer
18,665,924
prev_agg['AVG_CREDIT'] = prev_group['AMT_CREDIT'].mean() prev_agg['MAX_CREDIT'] = prev_group['AMT_CREDIT'].max() prev_agg['MIN_CREDIT'] = prev_group['AMT_CREDIT'].min() prev_agg.head()<merge>
Y_train = train["label"] X_train = train.drop(labels = ["label"], axis = 1 )
Digit Recognizer
18,665,924
prev_group = prev_app.groupby('SK_ID_CURR') prev_agg1 = prev_group['AMT_CREDIT'].agg(['mean', 'max', 'min']) prev_agg2 = prev_group['AMT_ANNUITY'].agg(['mean', 'max', 'min']) prev_agg = prev_agg1.merge(prev_agg2, on='SK_ID_CURR', how='inner') prev_agg.head()<feature_engineering>
( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data() train1 = np.concatenate([x_train1, x_test1], axis=0) y_train1 = np.concatenate([y_train1, y_test1], axis=0) Y_train1 = y_train1 X_train1 = train1.reshape(-1, 28*28 )
Digit Recognizer
18,665,924
prev_app['PREV_CREDIT_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_CREDIT'] prev_app['PREV_GOODS_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_GOODS_PRICE'] prev_app['PREV_CREDIT_APPL_RATIO'] = prev_app['AMT_CREDIT']/prev_app['AMT_APPLICATION'] prev_app['PREV_ANNUITY_APPL_RATIO'] = prev_app['AMT_ANNUITY']/prev_app['AMT_APPLICATION'] prev_app['PREV_GOODS_APPL_RATIO'] = prev_app['AMT_GOODS_PRICE']/prev_app['AMT_APPLICATION']<feature_engineering>
X_train = X_train / 255.0 test = test / 255.0 X_train1 = X_train1 / 255.0
Digit Recognizer
18,665,924
prev_app['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True) prev_app['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev_app['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev_app['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev_app['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True) prev_app['PREV_DAYS_LAST_DUE_DIFF'] = prev_app['DAYS_LAST_DUE_1ST_VERSION'] - prev_app['DAYS_LAST_DUE']<feature_engineering>
X_train = np.concatenate(( X_train.values, X_train1)) Y_train = np.concatenate(( Y_train, Y_train1))
Digit Recognizer
18,665,924
all_pay = prev_app['AMT_ANNUITY'] * prev_app['CNT_PAYMENT'] prev_app['PREV_INTERESTS_RATE'] =(all_pay/prev_app['AMT_CREDIT'] - 1)/prev_app['CNT_PAYMENT']<define_variables>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
18,665,924
agg_dict = { 'SK_ID_CURR':['count'], 'AMT_CREDIT':['mean', 'max', 'sum'], 'AMT_ANNUITY':['mean', 'max', 'sum'], 'AMT_APPLICATION':['mean', 'max', 'sum'], 'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'], 'AMT_GOODS_PRICE':['mean', 'max', 'sum'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'PREV_CREDIT_DIFF':['mean', 'max', 'sum'], 'PREV_CREDIT_APPL_RATIO':['mean', 'max'], 'PREV_GOODS_DIFF':['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO':['mean', 'max'], 'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'], 'PREV_INTERESTS_RATE':['mean', 'max'] }<groupby>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )
Digit Recognizer
18,665,924
prev_group = prev_app.groupby('SK_ID_CURR') prev_amt_agg = prev_group.agg(agg_dict) prev_amt_agg.columns = ['PREV_'+('_' ).join(column ).upper() for column in prev_amt_agg.columns.ravel() ]<count_values>
model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
18,665,924
prev_app['NAME_CONTRACT_STATUS'].value_counts()<groupby>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
18,665,924
prev_refused_agg = prev_refused.groupby('SK_ID_CURR')['SK_ID_CURR'].count() prev_refused_agg.shape, prev_amt_agg.shape<create_dataframe>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
18,665,924
pd.DataFrame(prev_refused_agg )<rename_columns>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
18,665,924
prev_refused_agg.reset_index(name='PREV_REFUSED_COUNT' )<merge>
epochs = 50 batch_size = 128
Digit Recognizer
18,665,924
prev_refused_agg = prev_refused_agg.reset_index(name='PREV_REFUSED_COUNT') prev_amt_agg = prev_amt_agg.reset_index() prev_amt_refused_agg = prev_amt_agg.merge(prev_refused_agg, on='SK_ID_CURR', how='left') prev_amt_refused_agg.head(10 )<count_values>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) train_gen = datagen.flow(X_train,Y_train, batch_size=batch_size )
Digit Recognizer
18,665,924
prev_amt_refused_agg['PREV_REFUSED_COUNT'].value_counts(dropna=False )<feature_engineering>
history = model.fit(train_gen, epochs = epochs,validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction], validation_steps = X_val.shape[0] // batch_size )
Digit Recognizer
18,665,924
prev_amt_refused_agg = prev_amt_refused_agg.fillna(0) prev_amt_refused_agg['PREV_REFUSE_RATIO'] = prev_amt_refused_agg['PREV_REFUSED_COUNT'] / prev_amt_refused_agg['PREV_SK_ID_CURR_COUNT'] prev_amt_refused_agg.head(10 )<groupby>
errors =(Y_pred_classes - Y_true != 0) Y_pred_classes_errors = Y_pred_classes[errors] Y_pred_errors = Y_pred[errors] Y_true_errors = Y_true[errors] X_val_errors = X_val[errors]
Digit Recognizer
18,665,924
prev_refused_appr_group = prev_app[prev_app['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby(['SK_ID_CURR', 'NAME_CONTRACT_STATUS']) prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack() prev_refused_appr_agg.head(10 )<drop_column>
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1) true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1)) delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors sorted_dela_errors = np.argsort(delta_pred_true_errors) most_important_errors = sorted_dela_errors[-6:] display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors )
Digit Recognizer
18,665,924
prev_refused_appr_agg = prev_refused_appr_agg.fillna(0) prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT'] prev_refused_appr_agg = prev_refused_appr_agg.reset_index() prev_refused_appr_agg.head(10 )<merge>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
18,665,924
prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left') prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1) prev_agg.head(30 )<categorify>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_submission.csv",index=False )
Digit Recognizer
18,665,924
apps_all = get_apps_processed(apps )<merge>
( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data() Y_train1 = y_train1 X_train1 = x_train1.reshape(-1, 28*28 )
Digit Recognizer
18,665,924
print(apps_all.shape, prev_agg.shape) apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left') print(apps_all.shape )<feature_engineering>
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv') test_data = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
18,665,924
object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist() for column in object_columns: apps_all[column] = pd.factorize(apps_all[column])[0]<drop_column>
train_images = train_data.copy() train_images = train_images.values X_train = train_images[:,1:] y_train = train_images[:,0] X_test = test_data.values
Digit Recognizer
18,665,924
apps_all_train = apps_all[~apps_all['TARGET'].isnull() ] apps_all_test = apps_all[apps_all['TARGET'].isnull() ] apps_all_test = apps_all_test.drop('TARGET', axis=1 )<split>
predictions = np.zeros(( X_train.shape[0]))
Digit Recognizer
18,665,924
ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1) target_app = apps_all_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) train_x.shape, valid_x.shape<train_model>
x1=0 x2=0 print("Classifying Kaggle's 'test.csv' using KNN where K=1 and MNIST 70k images.. ") for i in range(0,28000): for j in range(0,70000): if np.absolute(X_test[i,:]-mnist_image[j,:] ).sum() ==0: predictions[i]=mnist_label[j] if i%1000==0: print(" %d images classified perfectly"%(i),end="") if j<60000: x1+=1 else: x2+=1 break if x1+x2==28000: print(" 28000 images classified perfectly.") print("All 28000 images are contained in MNIST.npz Dataset.") print("%d images are in MNIST.npz train and %d images are in MNIST.npz test"%(x1,x2))
Digit Recognizer
18,665,924
clf = LGBMClassifier( n_jobs=-1, n_estimators=1000, learning_rate=0.02, num_leaves=32, subsample=0.8, max_depth=12, silent=-1, verbose=-1 ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 50 )<save_to_csv>
final_pred = predictions[0:28000]
Digit Recognizer
18,665,924
preds = clf.predict_proba(apps_all_test.drop('SK_ID_CURR', axis=1)) [:, 1 ] apps_all_test['TARGET'] = preds apps_all_test[['SK_ID_CURR', 'TARGET']].to_csv('prev_baseline_03.csv', index=False )<load_from_csv>
my_submission = pd.DataFrame({'ImageId':np.arange(28000),'Label':final_pred.squeeze().astype(np.int)}) my_submission.head()
Digit Recognizer
18,665,924
application_train = pd.read_csv('/kaggle/input/home-credit-default-risk/application_train.csv') application_test = pd.read_csv('/kaggle/input/home-credit-default-risk/application_test.csv') <train_model>
my_submission["ImageId"]=my_submission["ImageId"]+1
Digit Recognizer
18,665,924
print("Dimension of application_train :", application_train.shape) print("결측치가 있는 컬럼 수 :",(application_train.isnull().sum() !=0 ).sum()) application_train.head()<train_model>
my_submission.to_csv('best_submission.csv', index=False )
Digit Recognizer
21,304,203
print("Dimension :", application_train.dropna(axis=0 ).shape) print("결측치가 있는 컬럼 수 :",(application_train.dropna(axis=0 ).isnull().sum() !=0 ).sum()) application_train.dropna(axis=0 )<train_model>
data_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') data_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
21,304,203
column_list = [] for name in column_series.keys() : if(column_series[name]>100000): column_list.append(name) print(column_list, len(column_list))<train_model>
print('Number of non-valid elements in training set:', data_train[data_train.isna() == True].count().sum() , ' Number of non valid elements in test set:', data_test[data_test.isna() == True].count().sum() )
Digit Recognizer
21,304,203
def show_hist_by_target(df, columns): cond_1 =(df['TARGET'] == 1) cond_0 =(df['TARGET'] == 0) for column in columns: fig, ax = plt.subplots(figsize=(12, 4), nrows=1, ncols=2, squeeze=False) if(type(df[column][0])is str): df_temp = df[["TARGET",column]].value_counts().astype(float) idx_temp = df_temp.reset_index(name='RATIO')[column].unique() for i in range(0,2): sum_temp = df_temp[i].sum() for j in idx_temp: df_temp[(i,j)] = df_temp[(i,j)] / sum_temp df_temp = df_temp.reset_index(name='RATIO') sns.barplot(x="TARGET", y="RATIO", hue=column, data=df_temp,ax=ax[0][0]) sns.lineplot(x=df[cond_1][column].value_counts().keys().tolist() , y=df[cond_1][column].value_counts() , label = 'target=1', color='red', ax=ax[0][1]) sns.lineplot(x=df[cond_0][column].value_counts().keys().tolist() , y=df[cond_0][column].value_counts() , label = 'target=0', color='blue', ax=ax[0][1]) else: sns.violinplot(x='TARGET', y=column, data=df, ax=ax[0][0]) sns.histplot(df[cond_1][column], label='target=1', color='red', ax=ax[0][1], kde=True) sns.histplot(df[cond_0][column], label='target=0', color='blue', ax=ax[0][1], kde=True) plt.show() plt.close()<sort_values>
data_train_pd = data_train.copy()
Digit Recognizer
21,304,203
abs(cor["TARGET"] ).sort_values()<count_values>
true_labels = data_train.label data_train = data_train.drop('label', axis = 1 )
Digit Recognizer
21,304,203
application_train.dtypes.value_counts()<categorify>
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) data_train_embedded = tsne.fit_transform(sample.drop('label', axis = 1))
Digit Recognizer
21,304,203
application_train["FONDKAPREMONT_MODE"]<categorify>
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train_pd.drop('label', axis = 1), data_train_pd.label, test_size = 0.25, random_state=0) knn = KNeighborsClassifier(n_neighbors=10, n_jobs=-1) knn.fit(X_train, y_train)
Digit Recognizer
21,304,203
le = LabelEncoder() le_count = 0 for col in application_train: if application_train[col].dtype == 'object': if len(list(application_train[col].unique())) >= 2: le.fit(application_train[col]) application_train[col] = le.transform(application_train[col]) application_test[col] = le.transform(application_test[col]) le_count += 1 print('Label Encoding : %d 컬럼 라벨 인코딩 완료.' % le_count )<define_variables>
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train_pd.drop('label', axis = 1), data_train_pd.label, test_size = 0.25, random_state=0) bnbclf = BernoulliNB() bnbclf.fit(X_train, y_train )
Digit Recognizer
21,304,203
application_train["FONDKAPREMONT_MODE"]<count_unique_values>
print("Accuracy score: {:.2f}".format(bnbclf.score(X_holdout, y_holdout))) print("Cross-entropy loss: {:.2f}".format(log_loss(np.array(y_holdout), bnbclf.predict_proba(X_holdout))))
Digit Recognizer
21,304,203
application_train.select_dtypes('object' ).apply(pd.Series.nunique, axis = 0 )<define_variables>
bnb_params = {'alpha': np.arange(0.01, 0.1, 0.05), 'binarize' : np.arange(0, 0.5, 0.2), 'fit_prior': [True, False] } bnbcv = GridSearchCV(bnbclf, param_grid = bnb_params, cv = 3 )
Digit Recognizer
21,304,203
rel_list = [] for rel_column in rel.index: if(rel[rel_column]<0.03): rel_list.append(rel_column) print(rel_column )<drop_column>
bnbcv.fit(X_train, y_train) bnb_best = bnbcv.best_estimator_
Digit Recognizer
21,304,203
rel_list.remove('SK_ID_CURR' )<drop_column>
bnbcv.best_params_
Digit Recognizer
21,304,203
column_list.remove("EXT_SOURCE_1") app_train = application_train<train_model>
print("Accuracy score: {:.2f}".format(bnb_best.score(X_holdout, y_holdout))) print("Cross-entropy loss: {:.2f}".format(log_loss(np.array(y_holdout), bnb_best.predict_proba(X_holdout))))
Digit Recognizer
21,304,203
print("Dimension of application_test :", application_test.shape) print("결측치가 있는 컬럼 수 :",(application_test.isnull().sum() !=0 ).sum()) application_test.head()<drop_column>
model = Sequential() model.add(Convolution2D(32,(3, 3), activation='relu', input_shape=(28,28,1))) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones",)) model.add(Convolution2D(32,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones",)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax'))
Digit Recognizer
21,304,203
app_test = application_test<feature_engineering>
data_train = data_train / 255 data_test = data_test / 255
Digit Recognizer
21,304,203
def data_processing(out, data): out['APPS_EXT_SOURCE_MEAN'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) out['APPS_EXT_SOURCE_STD'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) out['APPS_EXT_SOURCE_STD'] = out['APPS_EXT_SOURCE_STD'].fillna(out['APPS_EXT_SOURCE_STD'].mean()) out['APPS_ANNUITY_CREDIT_RATIO'] = data['AMT_ANNUITY']/data['AMT_CREDIT'] out['APPS_GOODS_CREDIT_RATIO'] = data['AMT_GOODS_PRICE']/data['AMT_CREDIT'] out['APPS_ANNUITY_INCOME_RATIO'] = data['AMT_ANNUITY']/data['AMT_INCOME_TOTAL'] out['APPS_GOODS_INCOME_RATIO'] = data['AMT_GOODS_PRICE']/data['AMT_INCOME_TOTAL'] out['APPS_CREDIT_INCOME_RATIO'] = data['AMT_CREDIT']/data['AMT_INCOME_TOTAL'] out['APPS_CNT_FAM_INCOME_RATIO'] = data['AMT_INCOME_TOTAL']/data['CNT_FAM_MEMBERS'] out['APPS_EMPLOYED_BIRTH_RATIO'] = data['DAYS_EMPLOYED']/data['DAYS_BIRTH'] out['APPS_INCOME_EMPLOYED_RATIO'] = data['AMT_INCOME_TOTAL']/data['DAYS_EMPLOYED'] out['APPS_INCOME_BIRTH_RATIO'] = data['AMT_INCOME_TOTAL']/data['DAYS_BIRTH'] out['APPS_CAR_BIRTH_RATIO'] = data['OWN_CAR_AGE'] / data['DAYS_BIRTH'] out['APPS_CAR_EMPLOYED_RATIO'] = data['OWN_CAR_AGE'] / data['DAYS_EMPLOYED'] return out<train_model>
y = np.array(pd.get_dummies(true_labels))
Digit Recognizer
21,304,203
app_train = data_processing(app_train, application_train) app_test = data_processing(app_test, application_test) app_train.shape, app_test.shape<load_from_csv>
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train, y, test_size = 0.25, random_state=17 )
Digit Recognizer
21,304,203
prev_app = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv') print(prev_app.shape, app_train.shape )<feature_engineering>
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=0.000001, verbose=1 )
Digit Recognizer
21,304,203
prev_app['PREV_CREDIT_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_CREDIT'] prev_app['PREV_GOODS_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_GOODS_PRICE'] prev_app['PREV_CREDIT_APPL_RATIO'] = prev_app['AMT_CREDIT']/prev_app['AMT_APPLICATION'] prev_app['PREV_ANNUITY_APPL_RATIO'] = prev_app['AMT_ANNUITY']/prev_app['AMT_APPLICATION'] prev_app['PREV_GOODS_APPL_RATIO'] = prev_app['AMT_GOODS_PRICE']/prev_app['AMT_APPLICATION']<feature_engineering>
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) result = model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1, validation_data=(X_holdout, y_holdout), callbacks = [reduce_lr] )
Digit Recognizer
21,304,203
prev_app['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True) prev_app['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev_app['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev_app['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev_app['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True) prev_app['PREV_DAYS_LAST_DUE_DIFF'] = prev_app['DAYS_LAST_DUE_1ST_VERSION'] - prev_app['DAYS_LAST_DUE']<define_variables>
layer_names = [layer.name for layer in model.layers] layer_outputs = [layer.output for layer in model.layers] layer_outputs = [layer_outputs[0], layer_outputs[2]] feature_map_model = Model(model.input, layer_outputs) im = X_train[99:100,:] feature_maps = feature_map_model.predict(im )
Digit Recognizer
21,304,203
agg_dict = { 'SK_ID_CURR':['count'], 'AMT_CREDIT':['mean', 'max', 'sum'], 'AMT_ANNUITY':['mean', 'max', 'sum'], 'AMT_APPLICATION':['mean', 'max', 'sum'], 'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'], 'AMT_GOODS_PRICE':['mean', 'max', 'sum'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'PREV_CREDIT_DIFF':['mean', 'max', 'sum'], 'PREV_CREDIT_APPL_RATIO':['mean', 'max'], 'PREV_GOODS_DIFF':['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO':['mean', 'max'], 'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'], 'PREV_INTERESTS_RATE':['mean', 'max'] } prev_group = prev_app.groupby('SK_ID_CURR') prev_amt_agg = prev_group.agg(agg_dict) prev_amt_agg.columns = ['PREV_' +('_' ).join(column ).upper() for column in prev_amt_agg.columns.ravel() ] prev_amt_agg.head()<merge>
augumentator = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=15, width_shift_range=0.15, shear_range=0.1, zoom_range=0.1, validation_split=0.0, horizontal_flip=False, vertical_flip=False) augumentator.fit(X_train )
Digit Recognizer
21,304,203
prev_app_merge = app_train.merge(prev_amt_agg, on='SK_ID_CURR', how='left', indicator=True) prev_app_merge = prev_app_merge.drop(columns=['_merge']) prev_app_merge.shape<count_values>
history = model.fit(augumentator.flow(X_train, y_train, batch_size = 32), epochs = 10, validation_data =(X_holdout, y_holdout), verbose = 1, callbacks = [reduce_lr] )
Digit Recognizer
21,304,203
prev_app['NAME_CONTRACT_STATUS'].value_counts()<define_variables>
mnist = tf.keras.datasets.mnist (X_train_mnist, y_train_mnist),(X_val_mnist, y_val_mnist)= mnist.load_data()
Digit Recognizer
21,304,203
cond_refused =(prev_app['NAME_CONTRACT_STATUS'] == 'Refused') cond_approved =(prev_app['NAME_CONTRACT_STATUS'] == 'Approved') prev_refused = prev_app[cond_refused] prev_approved = prev_app[cond_approved] prev_refused.shape, prev_approved.shape, prev_app.shape<groupby>
y_train_mnist = np.array(pd.get_dummies(pd.Series(y_train_mnist))) y_holdout_mnist = np.array(pd.get_dummies(pd.Series(y_val_mnist)) )
Digit Recognizer
21,304,203
prev_refused = prev_refused.groupby('SK_ID_CURR') prev_approved = prev_approved.groupby('SK_ID_CURR' )<count_values>
X_train_mnist = X_train_mnist.reshape(-1, 28, 28, 1) X_holdout_mnist = X_val_mnist.reshape(-1, 28, 28, 1) X_train_mnist = X_train_mnist / 255 X_holdout_mnist = X_holdout_mnist /255
Digit Recognizer
21,304,203
prev_refused = prev_refused['NAME_CONTRACT_TYPE'].count() prev_refused.name = "PRE_CONTRACT_REFUSED" prev_approved = prev_approved['NAME_CONTRACT_TYPE'].count() prev_approved.name = "PRE_CONTRACT_APPROVED"<merge>
X_train_ext = np.concatenate(( X_train, X_train_mnist), axis = 0) X_holdout_ext = np.concatenate(( X_holdout, X_holdout_mnist), axis = 0) y_train_ext = np.concatenate(( y_train, y_train_mnist), axis = 0) y_holdout_ext = np.concatenate(( y_holdout, y_holdout_mnist), axis = 0 )
Digit Recognizer
21,304,203
prev_app_merge = prev_app_merge.merge(prev_approved, on='SK_ID_CURR', how='left', indicator=False) prev_app_merge = prev_app_merge.merge(prev_refused, on='SK_ID_CURR', how='left', indicator=False) prev_app_merge['PRE_CONTRACT_APPROVED_RATE'] = prev_app_merge['PRE_CONTRACT_APPROVED'] /(prev_app_merge['PRE_CONTRACT_APPROVED'] + prev_app_merge['PRE_CONTRACT_REFUSED']) prev_app_merge['PRE_CONTRACT_REFUSED_RATE'] = prev_app_merge['PRE_CONTRACT_REFUSED'] /(prev_app_merge['PRE_CONTRACT_APPROVED'] + prev_app_merge['PRE_CONTRACT_REFUSED']) prev_app_merge.head()<categorify>
model.fit(X_train_ext, y_train_ext, batch_size=32, epochs=20, verbose=1, validation_data=(X_holdout, y_holdout), callbacks = [reduce_lr] )
Digit Recognizer
21,304,203
prev_app_merge = prev_app_merge.replace(float('NaN'),0) prev_app_merge.head()<load_from_csv>
predictions = model.predict(data_test ).argmax(axis = 1) predictions submission = pd.DataFrame({'ImageId':np.arange(1, len(predictions)+1), 'Label':predictions}) submission.to_csv('submission.csv', index=False )
Digit Recognizer
21,316,401
bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv') print("Size of bureau data", bureau.shape )<merge>
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten from tensorflow.keras import utils from tensorflow.keras.preprocessing import image from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint import tensorflow as tf from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import matplotlib.pyplot as plt
Digit Recognizer
21,316,401
PAST_LOANS_PER_CUS = bureau[['SK_ID_CURR', 'DAYS_CREDIT']].groupby(by = ['SK_ID_CURR'])['DAYS_CREDIT'].count().reset_index().rename(index=str, columns={'DAYS_CREDIT': 'BUREAU_LOAN_COUNT'}) app_train_bureau = prev_app_merge.merge(PAST_LOANS_PER_CUS, on = ['SK_ID_CURR'], how = 'left') print(app_train_bureau.shape) app_train_bureau.head()<merge>
data_train = np.loadtxt('/kaggle/input/digit-recognizer/train.csv', skiprows = 1, delimiter= ',') data_train[0:5]
Digit Recognizer