orionhunts-ai
commited on
Commit
•
476d115
1
Parent(s):
2ba274e
CR Model selection and implemented WandB for SKlearn
Browse files- .env.example +0 -0
- .gitignore +6 -0
- README.md +0 -0
- __init__.py +0 -0
- constants.py +0 -0
- data/__init__.py +0 -0
- data/processed/PII_Customer_Personality_Analysis/artifacts/corr_matrix_PII_Customer_Personality_Analysis_correlation_matrix.csv +26 -0
- data/processed/PII_Customer_Personality_Analysis/artifacts/correlation_matrix.png +0 -0
- data/processed/PII_Customer_Personality_Analysis/data/2024_08_25_PII_Customer_Personality_Analysis_v0.1.csv +0 -0
- data/processed/PII_Customer_Personality_Analysis/data/corr_matrix_PII_Customer_Personality_Analysis_correlation_matrix.csv +26 -0
- data/processed/__init__.py +0 -0
- data/processed/clustering_ml.py +102 -0
- data/processed/customer_profile_marketing.csv +0 -0
- data/processed/eda.py.bak +104 -0
- data/processed/eda_code_final_fixed.py +128 -0
- data/processed/supervized_ml.py +195 -0
- data/raw/customer_profile_eda_normalization.ipynb +332 -0
- data/raw/data_notes.md +11 -0
- data/raw/hf/.gitattributes +58 -0
- data/raw/hf/README.md +55 -0
- data/raw/marketing_campaign.csv +0 -0
- data/raw/medical_data_raw.ipynb +207 -0
- data/raw/raw_data.ipynb +662 -0
- gretel_ai/gretel_exp.md +4 -0
- medical_records_did +1 -0
- pyproject.toml +30 -0
- src/lstm.ipynb +0 -0
- transformer_models/unsloth_model_colab.ipynb +466 -0
.env.example
ADDED
File without changes
|
.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
poetry.lock
|
3 |
+
*.env
|
4 |
+
**/.env
|
5 |
+
.DS_Store
|
6 |
+
**/wandb/
|
README.md
ADDED
File without changes
|
__init__.py
ADDED
File without changes
|
constants.py
ADDED
File without changes
|
data/__init__.py
ADDED
File without changes
|
data/processed/PII_Customer_Personality_Analysis/artifacts/corr_matrix_PII_Customer_Personality_Analysis_correlation_matrix.csv
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,Education,Marital_Status,Income,Kidhome,Teenhome,Recency,MntWines,MntFruits,MntMeatProducts,MntFishProducts,MntSweetProducts,MntGoldProds,NumDealsPurchases,NumWebPurchases,NumCatalogPurchases,NumStorePurchases,NumWebVisitsMonth,AcceptedCmp3,AcceptedCmp4,AcceptedCmp5,AcceptedCmp1,AcceptedCmp2,Complain,target,Age
|
2 |
+
Education,1.0,0.00601057971479844,0.12069235890931647,-0.045390101722656215,0.12012771551954766,-0.011418202661844836,0.19788629514026926,-0.08246221470202815,0.03996093529190393,-0.11474748247768826,-0.10727879120827798,-0.09708394083472364,0.026207871865264364,0.08242517900615554,0.06904876324567108,0.06779203319202283,-0.040821622200267235,0.005822767693396697,0.0588849087724573,0.03266911640198275,-0.009741208717866857,0.02147726629373715,-0.05086296084713461,0.09080601779180715,0.1710648461886182
|
3 |
+
Marital_Status,0.00601057971479844,1.0,0.021352847397029834,-0.021718068362697428,-0.0032431226287101722,0.011137721721606375,0.009228819772649922,0.0005591923135064867,0.02780994113296931,0.03420772518449643,0.01565110733819486,0.0008966465168111868,-0.019293729796295137,0.00011772760451422034,0.014930606044176415,0.002603366103162813,-0.026657557466784578,-0.02623359458981945,0.014274398390713436,0.010256030152237389,-0.015569020204498948,0.018908848090815854,-0.005393690720407015,-0.012641150709966707,0.058228774244458686
|
4 |
+
Income,0.12069235890931647,0.021352847397029834,1.0,-0.42866900796918467,0.019133378179405063,-0.003969755538429371,0.5786497501367387,0.4308416809908738,0.5846333567663218,0.43887135945164113,0.44074379151936854,0.3259164464972651,-0.08310089573037695,0.38787781129179055,0.5891624419343273,0.5293621402734204,-0.5530880116530972,-0.016174440058630012,0.18440036817458086,0.3359432659885299,0.27681986364264505,0.08754477410485566,-0.02722451231447758,0.13304666375157864,0.16179142819632944
|
5 |
+
Kidhome,-0.045390101722656215,-0.021718068362697428,-0.42866900796918467,1.0,-0.03986909491269764,0.011492148877606401,-0.49733585804824904,-0.3733961018645468,-0.4392605293302491,-0.3888842203052554,-0.3780261275829725,-0.35502942464114273,0.2169130475866851,-0.3719765493465889,-0.5045006219645888,-0.5013488096627505,0.44747694055447784,0.016066022250294954,-0.16202597120826812,-0.20530460075712562,-0.17416308346026005,-0.08186792980170485,0.040977948951332006,-0.0779087218804935,-0.23361461678438017
|
6 |
+
Teenhome,0.12012771551954766,-0.0032431226287101722,0.019133378179405063,-0.03986909491269764,1.0,0.013837883216177588,0.003746662683131421,-0.17655763847960287,-0.26112238523373393,-0.2052418665057769,-0.1630557773562015,-0.01988723381366726,0.38624630410345656,0.16207718515997702,-0.11269219774843708,0.04973702442961182,0.13124002195157045,-0.04252154241540535,0.038375727599795045,-0.19079132962891837,-0.14485535030427124,-0.015520861256563333,0.003306980000913191,-0.15390119913873612,0.35079057250867324
|
7 |
+
Recency,-0.011418202661844836,0.011137721721606375,-0.003969755538429371,0.011492148877606401,0.013837883216177588,1.0,0.015721019423318053,-0.005843749911872586,0.022517635114411027,0.0005509232351481579,0.02510977031031601,0.01766263773496973,0.0021154508116066205,-0.005640853762668498,0.02408140757530815,-0.0004338265887671439,-0.01856364340172459,-0.03225726632386708,0.017566258881346348,-0.0004819996751676512,-0.021061220855675345,-0.0014003820869686575,0.01363667026546777,-0.19976636929150263,0.016294899725588837
|
8 |
+
MntWines,0.19788629514026926,0.009228819772649922,0.5786497501367387,-0.49733585804824904,0.003746662683131421,0.015721019423318053,1.0,0.3870238608948813,0.5688600028034905,0.3977210502397404,0.39032580211914597,0.3927309933121961,0.008885928846518159,0.5537859390502029,0.6347527405610132,0.6400119079346188,-0.3219779006408591,0.06146322133332154,0.37314333590551424,0.473550447360861,0.3514171077786048,0.20618492919071135,-0.03947021117770349,0.24629895700789062,0.15945109606670785
|
9 |
+
MntFruits,-0.08246221470202815,0.0005591923135064867,0.4308416809908738,-0.3733961018645468,-0.17655763847960287,-0.005843749911872586,0.3870238608948813,1.0,0.5478221664444243,0.5934310502810902,0.5716060634768749,0.39648692442903605,-0.13451209943613016,0.3020388491308559,0.4862630707589945,0.45849103147873843,-0.4187289323674573,0.014423959637837509,0.006395603242838285,0.21287107479087422,0.1918157630946309,-0.009980152400035486,-0.005324098581870991,0.12244267882212094,0.017746520210085638
|
10 |
+
MntMeatProducts,0.03996093529190393,0.02780994113296931,0.5846333567663218,-0.4392605293302491,-0.26112238523373393,0.022517635114411027,0.5688600028034905,0.5478221664444243,1.0,0.5735740153436658,0.5351361087117303,0.35944628071880613,-0.12130771413541634,0.30709036563638303,0.7341265978631459,0.4860055452848598,-0.5394844166382349,0.018437950940138954,0.09161819640940018,0.3768671184726933,0.31307611216860326,0.043521399455980485,-0.0237819441070392,0.23774641828315587,0.03369674544450875
|
11 |
+
MntFishProducts,-0.11474748247768826,0.03420772518449643,0.43887135945164113,-0.3888842203052554,-0.2052418665057769,0.0005509232351481579,0.3977210502397404,0.5934310502810902,0.5735740153436658,1.0,0.5838669550256257,0.4271420401285096,-0.14324108564174431,0.29968751037596947,0.5327567837134197,0.45774504320424736,-0.44642329175818335,-0.00021899209055690604,0.016105384537408233,0.19627745116954454,0.2616081098154486,0.0023448969973614508,-0.02122023035141569,0.10814510985482437,0.04042508416794455
|
12 |
+
MntSweetProducts,-0.10727879120827798,0.01565110733819486,0.44074379151936854,-0.3780261275829725,-0.1630557773562015,0.02510977031031601,0.39032580211914597,0.5716060634768749,0.5351361087117303,0.5838669550256257,1.0,0.35744974733671053,-0.12143192773267662,0.3339372174806106,0.49513581787229005,0.4552251635983501,-0.4223708035871346,0.0017804341899794044,0.029313011665345452,0.2592298737532875,0.24510196257446312,0.010188061683200663,-0.0226412001817123,0.1161703734475162,0.02020441495744574
|
13 |
+
MntGoldProds,-0.09708394083472364,0.0008966465168111868,0.3259164464972651,-0.35502942464114273,-0.01988723381366726,0.01766263773496973,0.3927309933121961,0.39648692442903605,0.35944628071880613,0.4271420401285096,0.35744974733671053,1.0,0.05190482939144336,0.40706566619258716,0.44242825214836723,0.3891801722259885,-0.2476905571725933,0.1249578642120278,0.024015092911599267,0.1813973755277105,0.17013156126608653,0.05073361018385326,-0.031133459344139035,0.1403316444499485,0.06420769327026418
|
14 |
+
NumDealsPurchases,0.026207871865264364,-0.019293729796295137,-0.08310089573037695,0.2169130475866851,0.38624630410345656,0.0021154508116066205,0.008885928846518159,-0.13451209943613016,-0.12130771413541634,-0.14324108564174431,-0.12143192773267662,0.05190482939144336,1.0,0.24144031825434095,-0.012118428034188273,0.06610659381931695,0.3460483799648631,-0.023135079994654903,0.016076520490276074,-0.1842529426068536,-0.12737389187862327,-0.03798115053624632,0.0004972466506366478,0.003451073256152576,0.05866805087264471
|
15 |
+
NumWebPurchases,0.08242517900615554,0.00011772760451422034,0.38787781129179055,-0.3719765493465889,0.16207718515997702,-0.005640853762668498,0.5537859390502029,0.3020388491308559,0.30709036563638303,0.29968751037596947,0.3339372174806106,0.40706566619258716,0.24144031825434095,1.0,0.3868676401456998,0.5162401826934668,-0.05122626307505021,0.04295782900243499,0.1629322581651734,0.14118889093609427,0.1592916660892605,0.034828595290096596,-0.016641779042421617,0.1514312334625439,0.1530513747670212
|
16 |
+
NumCatalogPurchases,0.06904876324567108,0.014930606044176415,0.5891624419343273,-0.5045006219645888,-0.11269219774843708,0.02408140757530815,0.6347527405610132,0.4862630707589945,0.7341265978631459,0.5327567837134197,0.49513581787229005,0.44242825214836723,-0.012118428034188273,0.3868676401456998,1.0,0.517840451115637,-0.5220037739848213,0.10434509873303167,0.14018199197446132,0.3224705753164749,0.3090257184513369,0.09991528121453742,-0.0208391906218129,0.2199136123036807,0.12176397201297959
|
17 |
+
NumStorePurchases,0.06779203319202283,0.002603366103162813,0.5293621402734204,-0.5013488096627505,0.04973702442961182,-0.0004338265887671439,0.6400119079346188,0.45849103147873843,0.4860055452848598,0.45774504320424736,0.4552251635983501,0.3891801722259885,0.06610659381931695,0.5162401826934668,0.517840451115637,1.0,-0.4323982572659749,-0.06891258923172716,0.17802019025920685,0.212953710216086,0.1787428895679233,0.08527077669680684,-0.016940707007434427,0.03624112917284234,0.12789072181374891
|
18 |
+
NumWebVisitsMonth,-0.040821622200267235,-0.026657557466784578,-0.5530880116530972,0.44747694055447784,0.13124002195157045,-0.01856364340172459,-0.3219779006408591,-0.4187289323674573,-0.5394844166382349,-0.44642329175818335,-0.4223708035871346,-0.2476905571725933,0.3460483799648631,-0.05122626307505021,-0.5220037739848213,-0.4323982572659749,1.0,0.06130723471258901,-0.028665889635463914,-0.2778831014680211,-0.19477318052693973,-0.0073616649308966215,0.01978500588725408,-0.002208954040941986,-0.12390393683196442
|
19 |
+
AcceptedCmp3,0.005822767693396697,-0.02623359458981945,-0.016174440058630012,0.016066022250294954,-0.04252154241540535,-0.03225726632386708,0.06146322133332154,0.014423959637837509,0.018437950940138954,-0.00021899209055690604,0.0017804341899794044,0.1249578642120278,-0.023135079994654903,0.04295782900243499,0.10434509873303167,-0.06891258923172716,0.06130723471258901,1.0,-0.07965858237443767,0.08024761492969107,0.09568286876259005,0.07170217241720112,0.008124113453170467,0.25400486323255694,-0.06178380024839474
|
20 |
+
AcceptedCmp4,0.0588849087724573,0.014274398390713436,0.18440036817458086,-0.16202597120826812,0.038375727599795045,0.017566258881346348,0.37314333590551424,0.006395603242838285,0.09161819640940018,0.016105384537408233,0.029313011665345452,0.024015092911599267,0.016076520490276074,0.1629322581651734,0.14018199197446132,0.17802019025920685,-0.028665889635463914,-0.07965858237443767,1.0,0.3113144997868341,0.24278177008858737,0.295049565226579,-0.027651941592759625,0.18020529304447308,0.06610852367279324
|
21 |
+
AcceptedCmp5,0.03266911640198275,0.010256030152237389,0.3359432659885299,-0.20530460075712562,-0.19079132962891837,-0.0004819996751676512,0.473550447360861,0.21287107479087422,0.3768671184726933,0.19627745116954454,0.2592298737532875,0.1813973755277105,-0.1842529426068536,0.14118889093609427,0.3224705753164749,0.212953710216086,-0.2778831014680211,0.08024761492969107,0.3113144997868341,1.0,0.407877927952029,0.22212082088409418,-0.009576350926681113,0.32337384792407603,-0.010574840069471311
|
22 |
+
AcceptedCmp1,-0.009741208717866857,-0.015569020204498948,0.27681986364264505,-0.17416308346026005,-0.14485535030427124,-0.021061220855675345,0.3514171077786048,0.1918157630946309,0.31307611216860326,0.2616081098154486,0.24510196257446312,0.17013156126608653,-0.12737389187862327,0.1592916660892605,0.3090257184513369,0.1787428895679233,-0.19477318052693973,0.09568286876259005,0.24278177008858737,0.407877927952029,1.0,0.17663707327744818,-0.025593647329387348,0.2973447406527777,0.009610506551183966
|
23 |
+
AcceptedCmp2,0.02147726629373715,0.018908848090815854,0.08754477410485566,-0.08186792980170485,-0.015520861256563333,-0.0014003820869686575,0.20618492919071135,-0.009980152400035486,0.043521399455980485,0.0023448969973614508,0.010188061683200663,0.05073361018385326,-0.03798115053624632,0.034828595290096596,0.09991528121453742,0.08527077669680684,-0.0073616649308966215,0.07170217241720112,0.295049565226579,0.22212082088409418,0.17663707327744818,1.0,-0.011458504341450727,0.16929370922966702,0.006716955921569739
|
24 |
+
Complain,-0.05086296084713461,-0.005393690720407015,-0.02722451231447758,0.040977948951332006,0.003306980000913191,0.01363667026546777,-0.03947021117770349,-0.005324098581870991,-0.0237819441070392,-0.02122023035141569,-0.0226412001817123,-0.031133459344139035,0.0004972466506366478,-0.016641779042421617,-0.0208391906218129,-0.016940707007434427,0.01978500588725408,0.008124113453170467,-0.027651941592759625,-0.009576350926681113,-0.025593647329387348,-0.011458504341450727,1.0,-0.0020292937073078535,0.030407246701888446
|
25 |
+
target,0.09080601779180715,-0.012641150709966707,0.13304666375157864,-0.0779087218804935,-0.15390119913873612,-0.19976636929150263,0.24629895700789062,0.12244267882212094,0.23774641828315587,0.10814510985482437,0.1161703734475162,0.1403316444499485,0.003451073256152576,0.1514312334625439,0.2199136123036807,0.03624112917284234,-0.002208954040941986,0.25400486323255694,0.18020529304447308,0.32337384792407603,0.2973447406527777,0.16929370922966702,-0.0020292937073078535,1.0,-0.023692119864284135
|
26 |
+
Age,0.1710648461886182,0.058228774244458686,0.16179142819632944,-0.23361461678438017,0.35079057250867324,0.016294899725588837,0.15945109606670785,0.017746520210085638,0.03369674544450875,0.04042508416794455,0.02020441495744574,0.06420769327026418,0.05866805087264471,0.1530513747670212,0.12176397201297959,0.12789072181374891,-0.12390393683196442,-0.06178380024839474,0.06610852367279324,-0.010574840069471311,0.009610506551183966,0.006716955921569739,0.030407246701888446,-0.023692119864284135,1.0
|
data/processed/PII_Customer_Personality_Analysis/artifacts/correlation_matrix.png
ADDED
data/processed/PII_Customer_Personality_Analysis/data/2024_08_25_PII_Customer_Personality_Analysis_v0.1.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/PII_Customer_Personality_Analysis/data/corr_matrix_PII_Customer_Personality_Analysis_correlation_matrix.csv
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,Education,Marital_Status,Income,Kidhome,Teenhome,Recency,MntWines,MntFruits,MntMeatProducts,MntFishProducts,MntSweetProducts,MntGoldProds,NumDealsPurchases,NumWebPurchases,NumCatalogPurchases,NumStorePurchases,NumWebVisitsMonth,AcceptedCmp3,AcceptedCmp4,AcceptedCmp5,AcceptedCmp1,AcceptedCmp2,Complain,target,Age
|
2 |
+
Education,1.0,0.00601057971479844,0.12069235890931647,-0.045390101722656215,0.12012771551954766,-0.011418202661844836,0.19788629514026926,-0.08246221470202815,0.03996093529190393,-0.11474748247768826,-0.10727879120827798,-0.09708394083472364,0.026207871865264364,0.08242517900615554,0.06904876324567108,0.06779203319202283,-0.040821622200267235,0.005822767693396697,0.0588849087724573,0.03266911640198275,-0.009741208717866857,0.02147726629373715,-0.05086296084713461,0.09080601779180715,0.1710648461886182
|
3 |
+
Marital_Status,0.00601057971479844,1.0,0.021352847397029834,-0.021718068362697428,-0.0032431226287101722,0.011137721721606375,0.009228819772649922,0.0005591923135064867,0.02780994113296931,0.03420772518449643,0.01565110733819486,0.0008966465168111868,-0.019293729796295137,0.00011772760451422034,0.014930606044176415,0.002603366103162813,-0.026657557466784578,-0.02623359458981945,0.014274398390713436,0.010256030152237389,-0.015569020204498948,0.018908848090815854,-0.005393690720407015,-0.012641150709966707,0.058228774244458686
|
4 |
+
Income,0.12069235890931647,0.021352847397029834,1.0,-0.42866900796918467,0.019133378179405063,-0.003969755538429371,0.5786497501367387,0.4308416809908738,0.5846333567663218,0.43887135945164113,0.44074379151936854,0.3259164464972651,-0.08310089573037695,0.38787781129179055,0.5891624419343273,0.5293621402734204,-0.5530880116530972,-0.016174440058630012,0.18440036817458086,0.3359432659885299,0.27681986364264505,0.08754477410485566,-0.02722451231447758,0.13304666375157864,0.16179142819632944
|
5 |
+
Kidhome,-0.045390101722656215,-0.021718068362697428,-0.42866900796918467,1.0,-0.03986909491269764,0.011492148877606401,-0.49733585804824904,-0.3733961018645468,-0.4392605293302491,-0.3888842203052554,-0.3780261275829725,-0.35502942464114273,0.2169130475866851,-0.3719765493465889,-0.5045006219645888,-0.5013488096627505,0.44747694055447784,0.016066022250294954,-0.16202597120826812,-0.20530460075712562,-0.17416308346026005,-0.08186792980170485,0.040977948951332006,-0.0779087218804935,-0.23361461678438017
|
6 |
+
Teenhome,0.12012771551954766,-0.0032431226287101722,0.019133378179405063,-0.03986909491269764,1.0,0.013837883216177588,0.003746662683131421,-0.17655763847960287,-0.26112238523373393,-0.2052418665057769,-0.1630557773562015,-0.01988723381366726,0.38624630410345656,0.16207718515997702,-0.11269219774843708,0.04973702442961182,0.13124002195157045,-0.04252154241540535,0.038375727599795045,-0.19079132962891837,-0.14485535030427124,-0.015520861256563333,0.003306980000913191,-0.15390119913873612,0.35079057250867324
|
7 |
+
Recency,-0.011418202661844836,0.011137721721606375,-0.003969755538429371,0.011492148877606401,0.013837883216177588,1.0,0.015721019423318053,-0.005843749911872586,0.022517635114411027,0.0005509232351481579,0.02510977031031601,0.01766263773496973,0.0021154508116066205,-0.005640853762668498,0.02408140757530815,-0.0004338265887671439,-0.01856364340172459,-0.03225726632386708,0.017566258881346348,-0.0004819996751676512,-0.021061220855675345,-0.0014003820869686575,0.01363667026546777,-0.19976636929150263,0.016294899725588837
|
8 |
+
MntWines,0.19788629514026926,0.009228819772649922,0.5786497501367387,-0.49733585804824904,0.003746662683131421,0.015721019423318053,1.0,0.3870238608948813,0.5688600028034905,0.3977210502397404,0.39032580211914597,0.3927309933121961,0.008885928846518159,0.5537859390502029,0.6347527405610132,0.6400119079346188,-0.3219779006408591,0.06146322133332154,0.37314333590551424,0.473550447360861,0.3514171077786048,0.20618492919071135,-0.03947021117770349,0.24629895700789062,0.15945109606670785
|
9 |
+
MntFruits,-0.08246221470202815,0.0005591923135064867,0.4308416809908738,-0.3733961018645468,-0.17655763847960287,-0.005843749911872586,0.3870238608948813,1.0,0.5478221664444243,0.5934310502810902,0.5716060634768749,0.39648692442903605,-0.13451209943613016,0.3020388491308559,0.4862630707589945,0.45849103147873843,-0.4187289323674573,0.014423959637837509,0.006395603242838285,0.21287107479087422,0.1918157630946309,-0.009980152400035486,-0.005324098581870991,0.12244267882212094,0.017746520210085638
|
10 |
+
MntMeatProducts,0.03996093529190393,0.02780994113296931,0.5846333567663218,-0.4392605293302491,-0.26112238523373393,0.022517635114411027,0.5688600028034905,0.5478221664444243,1.0,0.5735740153436658,0.5351361087117303,0.35944628071880613,-0.12130771413541634,0.30709036563638303,0.7341265978631459,0.4860055452848598,-0.5394844166382349,0.018437950940138954,0.09161819640940018,0.3768671184726933,0.31307611216860326,0.043521399455980485,-0.0237819441070392,0.23774641828315587,0.03369674544450875
|
11 |
+
MntFishProducts,-0.11474748247768826,0.03420772518449643,0.43887135945164113,-0.3888842203052554,-0.2052418665057769,0.0005509232351481579,0.3977210502397404,0.5934310502810902,0.5735740153436658,1.0,0.5838669550256257,0.4271420401285096,-0.14324108564174431,0.29968751037596947,0.5327567837134197,0.45774504320424736,-0.44642329175818335,-0.00021899209055690604,0.016105384537408233,0.19627745116954454,0.2616081098154486,0.0023448969973614508,-0.02122023035141569,0.10814510985482437,0.04042508416794455
|
12 |
+
MntSweetProducts,-0.10727879120827798,0.01565110733819486,0.44074379151936854,-0.3780261275829725,-0.1630557773562015,0.02510977031031601,0.39032580211914597,0.5716060634768749,0.5351361087117303,0.5838669550256257,1.0,0.35744974733671053,-0.12143192773267662,0.3339372174806106,0.49513581787229005,0.4552251635983501,-0.4223708035871346,0.0017804341899794044,0.029313011665345452,0.2592298737532875,0.24510196257446312,0.010188061683200663,-0.0226412001817123,0.1161703734475162,0.02020441495744574
|
13 |
+
MntGoldProds,-0.09708394083472364,0.0008966465168111868,0.3259164464972651,-0.35502942464114273,-0.01988723381366726,0.01766263773496973,0.3927309933121961,0.39648692442903605,0.35944628071880613,0.4271420401285096,0.35744974733671053,1.0,0.05190482939144336,0.40706566619258716,0.44242825214836723,0.3891801722259885,-0.2476905571725933,0.1249578642120278,0.024015092911599267,0.1813973755277105,0.17013156126608653,0.05073361018385326,-0.031133459344139035,0.1403316444499485,0.06420769327026418
|
14 |
+
NumDealsPurchases,0.026207871865264364,-0.019293729796295137,-0.08310089573037695,0.2169130475866851,0.38624630410345656,0.0021154508116066205,0.008885928846518159,-0.13451209943613016,-0.12130771413541634,-0.14324108564174431,-0.12143192773267662,0.05190482939144336,1.0,0.24144031825434095,-0.012118428034188273,0.06610659381931695,0.3460483799648631,-0.023135079994654903,0.016076520490276074,-0.1842529426068536,-0.12737389187862327,-0.03798115053624632,0.0004972466506366478,0.003451073256152576,0.05866805087264471
|
15 |
+
NumWebPurchases,0.08242517900615554,0.00011772760451422034,0.38787781129179055,-0.3719765493465889,0.16207718515997702,-0.005640853762668498,0.5537859390502029,0.3020388491308559,0.30709036563638303,0.29968751037596947,0.3339372174806106,0.40706566619258716,0.24144031825434095,1.0,0.3868676401456998,0.5162401826934668,-0.05122626307505021,0.04295782900243499,0.1629322581651734,0.14118889093609427,0.1592916660892605,0.034828595290096596,-0.016641779042421617,0.1514312334625439,0.1530513747670212
|
16 |
+
NumCatalogPurchases,0.06904876324567108,0.014930606044176415,0.5891624419343273,-0.5045006219645888,-0.11269219774843708,0.02408140757530815,0.6347527405610132,0.4862630707589945,0.7341265978631459,0.5327567837134197,0.49513581787229005,0.44242825214836723,-0.012118428034188273,0.3868676401456998,1.0,0.517840451115637,-0.5220037739848213,0.10434509873303167,0.14018199197446132,0.3224705753164749,0.3090257184513369,0.09991528121453742,-0.0208391906218129,0.2199136123036807,0.12176397201297959
|
17 |
+
NumStorePurchases,0.06779203319202283,0.002603366103162813,0.5293621402734204,-0.5013488096627505,0.04973702442961182,-0.0004338265887671439,0.6400119079346188,0.45849103147873843,0.4860055452848598,0.45774504320424736,0.4552251635983501,0.3891801722259885,0.06610659381931695,0.5162401826934668,0.517840451115637,1.0,-0.4323982572659749,-0.06891258923172716,0.17802019025920685,0.212953710216086,0.1787428895679233,0.08527077669680684,-0.016940707007434427,0.03624112917284234,0.12789072181374891
|
18 |
+
NumWebVisitsMonth,-0.040821622200267235,-0.026657557466784578,-0.5530880116530972,0.44747694055447784,0.13124002195157045,-0.01856364340172459,-0.3219779006408591,-0.4187289323674573,-0.5394844166382349,-0.44642329175818335,-0.4223708035871346,-0.2476905571725933,0.3460483799648631,-0.05122626307505021,-0.5220037739848213,-0.4323982572659749,1.0,0.06130723471258901,-0.028665889635463914,-0.2778831014680211,-0.19477318052693973,-0.0073616649308966215,0.01978500588725408,-0.002208954040941986,-0.12390393683196442
|
19 |
+
AcceptedCmp3,0.005822767693396697,-0.02623359458981945,-0.016174440058630012,0.016066022250294954,-0.04252154241540535,-0.03225726632386708,0.06146322133332154,0.014423959637837509,0.018437950940138954,-0.00021899209055690604,0.0017804341899794044,0.1249578642120278,-0.023135079994654903,0.04295782900243499,0.10434509873303167,-0.06891258923172716,0.06130723471258901,1.0,-0.07965858237443767,0.08024761492969107,0.09568286876259005,0.07170217241720112,0.008124113453170467,0.25400486323255694,-0.06178380024839474
|
20 |
+
AcceptedCmp4,0.0588849087724573,0.014274398390713436,0.18440036817458086,-0.16202597120826812,0.038375727599795045,0.017566258881346348,0.37314333590551424,0.006395603242838285,0.09161819640940018,0.016105384537408233,0.029313011665345452,0.024015092911599267,0.016076520490276074,0.1629322581651734,0.14018199197446132,0.17802019025920685,-0.028665889635463914,-0.07965858237443767,1.0,0.3113144997868341,0.24278177008858737,0.295049565226579,-0.027651941592759625,0.18020529304447308,0.06610852367279324
|
21 |
+
AcceptedCmp5,0.03266911640198275,0.010256030152237389,0.3359432659885299,-0.20530460075712562,-0.19079132962891837,-0.0004819996751676512,0.473550447360861,0.21287107479087422,0.3768671184726933,0.19627745116954454,0.2592298737532875,0.1813973755277105,-0.1842529426068536,0.14118889093609427,0.3224705753164749,0.212953710216086,-0.2778831014680211,0.08024761492969107,0.3113144997868341,1.0,0.407877927952029,0.22212082088409418,-0.009576350926681113,0.32337384792407603,-0.010574840069471311
|
22 |
+
AcceptedCmp1,-0.009741208717866857,-0.015569020204498948,0.27681986364264505,-0.17416308346026005,-0.14485535030427124,-0.021061220855675345,0.3514171077786048,0.1918157630946309,0.31307611216860326,0.2616081098154486,0.24510196257446312,0.17013156126608653,-0.12737389187862327,0.1592916660892605,0.3090257184513369,0.1787428895679233,-0.19477318052693973,0.09568286876259005,0.24278177008858737,0.407877927952029,1.0,0.17663707327744818,-0.025593647329387348,0.2973447406527777,0.009610506551183966
|
23 |
+
AcceptedCmp2,0.02147726629373715,0.018908848090815854,0.08754477410485566,-0.08186792980170485,-0.015520861256563333,-0.0014003820869686575,0.20618492919071135,-0.009980152400035486,0.043521399455980485,0.0023448969973614508,0.010188061683200663,0.05073361018385326,-0.03798115053624632,0.034828595290096596,0.09991528121453742,0.08527077669680684,-0.0073616649308966215,0.07170217241720112,0.295049565226579,0.22212082088409418,0.17663707327744818,1.0,-0.011458504341450727,0.16929370922966702,0.006716955921569739
|
24 |
+
Complain,-0.05086296084713461,-0.005393690720407015,-0.02722451231447758,0.040977948951332006,0.003306980000913191,0.01363667026546777,-0.03947021117770349,-0.005324098581870991,-0.0237819441070392,-0.02122023035141569,-0.0226412001817123,-0.031133459344139035,0.0004972466506366478,-0.016641779042421617,-0.0208391906218129,-0.016940707007434427,0.01978500588725408,0.008124113453170467,-0.027651941592759625,-0.009576350926681113,-0.025593647329387348,-0.011458504341450727,1.0,-0.0020292937073078535,0.030407246701888446
|
25 |
+
target,0.09080601779180715,-0.012641150709966707,0.13304666375157864,-0.0779087218804935,-0.15390119913873612,-0.19976636929150263,0.24629895700789062,0.12244267882212094,0.23774641828315587,0.10814510985482437,0.1161703734475162,0.1403316444499485,0.003451073256152576,0.1514312334625439,0.2199136123036807,0.03624112917284234,-0.002208954040941986,0.25400486323255694,0.18020529304447308,0.32337384792407603,0.2973447406527777,0.16929370922966702,-0.0020292937073078535,1.0,-0.023692119864284135
|
26 |
+
Age,0.1710648461886182,0.058228774244458686,0.16179142819632944,-0.23361461678438017,0.35079057250867324,0.016294899725588837,0.15945109606670785,0.017746520210085638,0.03369674544450875,0.04042508416794455,0.02020441495744574,0.06420769327026418,0.05866805087264471,0.1530513747670212,0.12176397201297959,0.12789072181374891,-0.12390393683196442,-0.06178380024839474,0.06610852367279324,-0.010574840069471311,0.009610506551183966,0.006716955921569739,0.030407246701888446,-0.023692119864284135,1.0
|
data/processed/__init__.py
ADDED
File without changes
|
data/processed/clustering_ml.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import torch
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
from sklearn.cluster import AgglomerativeClustering, KMeans
|
7 |
+
from sklearn.metrics import silhouette_score
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from sklearn.pipeline import Pipeline
|
10 |
+
from sklearn.preprocessing import StandardScaler
|
11 |
+
|
12 |
+
import wandb
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
device = "mps" if torch.backends.mps.is_available() else "cpu"
|
19 |
+
# Initialize a W&B run
|
20 |
+
wandb.login(key=os.getenv("WANDB_API_KEY"))
|
21 |
+
wandb.init(project="customer_personality_analysis", entity="orionai", name="clustering", job_type="train")
|
22 |
+
|
23 |
+
# Load the processed data
|
24 |
+
df_processed = pd.read_csv('df_processed.csv')
|
25 |
+
|
26 |
+
# Train-Test Split
|
27 |
+
X = df_processed.drop(columns=['target'])
|
28 |
+
y = df_processed['target']
|
29 |
+
|
30 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
|
31 |
+
|
32 |
+
# Log dataset artifact to W&B
|
33 |
+
dataset_artifact = wandb.Artifact('processed_data', dataset_artifact='dataset')
|
34 |
+
dataset_artifact.add_file('df_processed.csv')
|
35 |
+
wandb.log_artifact(dataset_artifact)
|
36 |
+
|
37 |
+
# KMeans Clustering
|
38 |
+
kmeans = KMeans(n_clusters=2, random_state=42)
|
39 |
+
kmeans.fit(X_train)
|
40 |
+
kmeans_labels = kmeans.predict(X_test)
|
41 |
+
kmeans_silhouette = silhouette_score(X_test, kmeans_labels)
|
42 |
+
|
43 |
+
# Log KMeans results
|
44 |
+
wandb.log({"KMeans Silhouette Score": kmeans_silhouette})
|
45 |
+
|
46 |
+
# Agglomerative Clustering
|
47 |
+
agg_clustering = AgglomerativeClustering(n_clusters=2)
|
48 |
+
agg_labels = agg_clustering.fit_predict(X_test)
|
49 |
+
agg_silhouette = silhouette_score(X_test, agg_labels)
|
50 |
+
|
51 |
+
# Log Agglomerative Clustering results
|
52 |
+
wandb.log({"Agglomerative Clustering Silhouette Score": agg_silhouette})
|
53 |
+
|
54 |
+
# Summary of results
|
55 |
+
print("\nSummary of Clustering Results:")
|
56 |
+
print(f"KMeans Silhouette Score: {kmeans_silhouette:.2f}")
|
57 |
+
print(f"Agglomerative Clustering Silhouette Score: {agg_silhouette:.2f}")
|
58 |
+
|
59 |
+
# Log final model metrics and artifacts
|
60 |
+
wandb.log({
|
61 |
+
"kmeans_silhouette_score": kmeans_silhouette,
|
62 |
+
"agg_silhouette_score": agg_silhouette
|
63 |
+
})
|
64 |
+
|
65 |
+
# Optional: Save the trained models and log them as artifacts
|
66 |
+
k_model_artifact = wandb.Artifact('kmeans_model', type='model')
|
67 |
+
wandb.log_artifact(k_model_artifact)
|
68 |
+
|
69 |
+
agg_model_artifact = wandb.Artifact('agg_clustering_model', type='model')
|
70 |
+
wandb.log_artifact(agg_model_artifact)
|
71 |
+
|
72 |
+
# Finish the W&B run
|
73 |
+
wandb.finish()
|
74 |
+
|
75 |
+
# Optional: If you want to run hyperparameter sweeps with W&B
|
76 |
+
sweep_config = {
|
77 |
+
'method': 'random',
|
78 |
+
'parameters': {
|
79 |
+
'n_clusters': {
|
80 |
+
'values': [2, 3, 4, 5]
|
81 |
+
}
|
82 |
+
}
|
83 |
+
}
|
84 |
+
|
85 |
+
sweep_id = wandb.sweep(sweep_config, project="customer-response-prediction")
|
86 |
+
|
87 |
+
def sweep_train():
|
88 |
+
# Initialize a W&B run
|
89 |
+
with wandb.init() as run:
|
90 |
+
config = wandb.config
|
91 |
+
|
92 |
+
# Run KMeans with the sweep's number of clusters
|
93 |
+
kmeans_sweep = KMeans(n_clusters=config.n_clusters, random_state=42)
|
94 |
+
kmeans_sweep.fit(X_train)
|
95 |
+
kmeans_sweep_labels = kmeans.predict(X_test)
|
96 |
+
kmeans_sweep_silhouette = silhouette_score(X_test, kmeans_sweep_labels)
|
97 |
+
|
98 |
+
# Log the results
|
99 |
+
wandb.log({"KMeans Silhouette Score": kmeans_sweep_silhouette})
|
100 |
+
|
101 |
+
# Run the sweep
|
102 |
+
wandb.agent(sweep_id, sweep_train)
|
data/processed/customer_profile_marketing.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/eda.py.bak
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
import seaborn as sns
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
from sklearn.preprocessing import LabelEncoder, StandardScaler
|
6 |
+
from sklearn.feature_selection import SelectKBest, f_classif
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
from sklearn.cluster import KMeans, AgglomerativeClustering
|
9 |
+
from sklearn.pipeline import Pipeline
|
10 |
+
from sklearn.metrics import silhouette_score
|
11 |
+
|
12 |
+
# Load the data
|
13 |
+
file_path = 'customer_profile_marketing.csv'
|
14 |
+
df = pd.read_csv(file_path)
|
15 |
+
|
16 |
+
# Initial Analysis
|
17 |
+
print("Initial DataFrame Info:")
|
18 |
+
df.info()
|
19 |
+
print("\nInitial DataFrame Head:")
|
20 |
+
print(df.head())
|
21 |
+
|
22 |
+
# Drop irrelevant columns
|
23 |
+
df = df.drop(columns=['Unnamed: 0', 'ID', 'Dt_Customer', 'Z_CostContact', 'Z_Revenue'])
|
24 |
+
|
25 |
+
# Handle missing values (if any)
|
26 |
+
print("Missing Values: {}", df.isna().sum())
|
27 |
+
df = df.dropna()
|
28 |
+
|
29 |
+
# Feature Engineering
|
30 |
+
# Calculate age from 'Year_Birth'
|
31 |
+
df['Age'] = 2024 - df['Year_Birth'] # Assuming the current year is 2024
|
32 |
+
df = df.drop(columns=['Year_Birth'])
|
33 |
+
|
34 |
+
# Convert categorical variables to numerical format using Label Encoding
|
35 |
+
label_encoder = LabelEncoder()
|
36 |
+
df['Education'] = label_encoder.fit_transform(df['Education'])
|
37 |
+
df['Marital_Status'] = label_encoder.fit_transform(df['Marital_Status'])
|
38 |
+
|
39 |
+
df.to_csv("./data_pre_processed_v1")
|
40 |
+
# Correlation Analysis
|
41 |
+
corr_matrix = df.corr()
|
42 |
+
plt.figure(figsize=(16, 12))
|
43 |
+
sns.heatmap(corr_matrix, annot=True, fmt='.2f', cmap='coolwarm', square=True)
|
44 |
+
plt.title('Correlation Matrix')
|
45 |
+
plt.show()
|
46 |
+
|
47 |
+
# Feature Selection using SelectKBest
|
48 |
+
X = df.drop(columns=['Response']) # Features
|
49 |
+
y = df['Response'] # Target variable
|
50 |
+
|
51 |
+
# Select the top 10 features
|
52 |
+
selector = SelectKBest(score_func=f_classif, k=10)
|
53 |
+
X_new = selector.fit_transform(X, y)
|
54 |
+
|
55 |
+
# Get the columns selected
|
56 |
+
selected_features = X.columns[selector.get_support()]
|
57 |
+
print("Selected Features:")
|
58 |
+
print(selected_features)
|
59 |
+
|
60 |
+
# Drop unimportant features
|
61 |
+
df_processed = df[selected_features]
|
62 |
+
df_processed['Response'] = y
|
63 |
+
|
64 |
+
# Normalize the relevant numerical features
|
65 |
+
scaler = StandardScaler()
|
66 |
+
numerical_cols = df_processed.select_dtypes(include=[np.number]).columns.tolist()
|
67 |
+
numerical_cols.remove('Response') # Remove the target variable from the list
|
68 |
+
|
69 |
+
df_processed[numerical_cols] = scaler.fit_transform(df_processed[numerical_cols])
|
70 |
+
|
71 |
+
# Encoding categorical variables (already encoded with LabelEncoder)
|
72 |
+
# No additional encoding is necessary if the categorical columns have been encoded
|
73 |
+
|
74 |
+
# Train-Test Split
|
75 |
+
X_train, X_test, y_train, y_test = train_test_split(df_processed.drop(columns=['Response']),
|
76 |
+
df_processed['Response'],
|
77 |
+
test_size=0.3,
|
78 |
+
random_state=42)
|
79 |
+
|
80 |
+
# Clustering Algorithms
|
81 |
+
# 1. KMeans
|
82 |
+
kmeans = KMeans(n_clusters=2, random_state=42)
|
83 |
+
kmeans.fit(X_train)
|
84 |
+
kmeans_labels = kmeans.predict(X_test)
|
85 |
+
kmeans_silhouette = silhouette_score(X_test, kmeans_labels)
|
86 |
+
print(f"KMeans Silhouette Score: {kmeans_silhouette:.2f}")
|
87 |
+
|
88 |
+
# 2. Agglomerative Clustering
|
89 |
+
agg_clustering = AgglomerativeClustering(n_clusters=2)
|
90 |
+
agg_labels = agg_clustering.fit_predict(X_test)
|
91 |
+
agg_silhouette = silhouette_score(X_test, agg_labels)
|
92 |
+
print(f"Agglomerative Clustering Silhouette Score: {agg_silhouette:.2f}")
|
93 |
+
|
94 |
+
# Summary of Results
|
95 |
+
print("\nSummary of Clustering Results:")
|
96 |
+
print(f"KMeans Silhouette Score: {kmeans_silhouette:.2f}")
|
97 |
+
print(f"Agglomerative Clustering Silhouette Score: {agg_silhouette:.2f}")
|
98 |
+
|
99 |
+
# Optional: If you want to use a pipeline for scaling and clustering together
|
100 |
+
# pipeline = Pipeline([('scaler', StandardScaler()), ('kmeans', KMeans(n_clusters=2, random_state=42))])
|
101 |
+
# pipeline.fit(X_train)
|
102 |
+
# pipeline_labels = pipeline.predict(X_test)
|
103 |
+
# pipeline_silhouette = silhouette_score(X_test, pipeline_labels)
|
104 |
+
# print(f"Pipeline KMeans Silhouette Score: {pipeline_silhouette:.2f}")
|
data/processed/eda_code_final_fixed.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from datetime import datetime
|
3 |
+
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import seaborn as sns
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
from sklearn.feature_selection import SelectKBest, f_classif
|
10 |
+
from sklearn.preprocessing import LabelEncoder, StandardScaler
|
11 |
+
|
12 |
+
import wandb
|
13 |
+
from constants import project, version
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
|
17 |
+
project = project
|
18 |
+
version = version
|
19 |
+
|
20 |
+
# Log in to W&B
|
21 |
+
wandb.login(key=os.getenv("WANDB_API_KEY"))
|
22 |
+
|
23 |
+
# Initialize W&B run
|
24 |
+
wandb.init(project=project, entity="orionai", name="PII_customer_relationship", job_type="dataset")
|
25 |
+
|
26 |
+
if os.path.exists(f'./{project}/') is False:
|
27 |
+
os.makedirs(f'./{project}/')
|
28 |
+
os.makedirs(f'./{project}/artifacts/')
|
29 |
+
os.makedirs(f'./{project}/data/')
|
30 |
+
|
31 |
+
print("Loading data...")
|
32 |
+
# Load the data
|
33 |
+
file_path = 'customer_profile_marketing.csv'
|
34 |
+
df = pd.read_csv(file_path)
|
35 |
+
df.rename(columns={'Response': 'target'}, inplace=True)
|
36 |
+
|
37 |
+
# Log the raw data as a W&B artifact
|
38 |
+
raw_data_artifact = wandb.Artifact('customer_profile_marketing_raw', type='dataset')
|
39 |
+
raw_data_artifact.add_file(file_path)
|
40 |
+
wandb.log_artifact(raw_data_artifact)
|
41 |
+
|
42 |
+
# Drop irrelevant columns
|
43 |
+
df = df.drop(columns=['Unnamed: 0', 'ID', 'Dt_Customer', 'Z_CostContact', 'Z_Revenue'])
|
44 |
+
|
45 |
+
print("Cleaning Data...")
|
46 |
+
# Handle missing values (if any)
|
47 |
+
df_copy = df.copy()
|
48 |
+
df = df.dropna()
|
49 |
+
print("Dropped missing values...")
|
50 |
+
|
51 |
+
# Log a table with the cleaned data (before feature engineering)
|
52 |
+
wandb.log({"cleaned_data": wandb.Table(dataframe=df)})
|
53 |
+
|
54 |
+
# Feature Engineering
|
55 |
+
df['Age'] = 2024 - df['Year_Birth'] # Assuming the current year is 2024
|
56 |
+
df = df.drop(columns=['Year_Birth'])
|
57 |
+
|
58 |
+
print("Converting categorical variables...")
|
59 |
+
# Convert categorical variables to numerical format using LabelEncoder
|
60 |
+
label_encoder = LabelEncoder()
|
61 |
+
df['Education'] = label_encoder.fit_transform(df['Education'])
|
62 |
+
df['Marital_Status'] = label_encoder.fit_transform(df['Marital_Status'])
|
63 |
+
# Log a table with the data after feature engineering
|
64 |
+
wandb.log({"feature_engineered_data": wandb.Table(dataframe=df)})
|
65 |
+
|
66 |
+
|
67 |
+
# Splitting data (train_test_split in model training files)
|
68 |
+
df_features = df.copy()
|
69 |
+
y = df_features['target'] # Target variable
|
70 |
+
X = df_features.drop(columns=['target']) # Features
|
71 |
+
|
72 |
+
print("Normalising numerical values...")
|
73 |
+
|
74 |
+
# Normalize the relevant numerical features
|
75 |
+
scaler = StandardScaler()
|
76 |
+
if y.values.any():
|
77 |
+
numerical_cols = X.select_dtypes(include=[np.number]).columns.tolist()
|
78 |
+
|
79 |
+
df_transform = scaler.fit_transform(X[numerical_cols])
|
80 |
+
df_transform = pd.DataFrame(df_transform)
|
81 |
+
|
82 |
+
# Log the processed data table
|
83 |
+
pc_artifact = wandb.Artifact("processed_data", type="dataset")
|
84 |
+
wandb.log_artifact(pc_artifact)
|
85 |
+
wandb.log({"processed_data": wandb.Table(dataframe=df_transform)})
|
86 |
+
|
87 |
+
print("beginning encoding...")
|
88 |
+
pre_encoding_path = f"./{project}/data/df_pre_encoding_{project}.csv"
|
89 |
+
# Log the processed data as a W&B artifact
|
90 |
+
pre_encoding = df_transform.to_csv(pre_encoding_path, index=False)
|
91 |
+
processed_data_artifact = wandb.Artifact("pre-encoding-file", type='dataset')
|
92 |
+
processed_data_artifact.add_file(pre_encoding_path)
|
93 |
+
wandb.log_artifact(processed_data_artifact)
|
94 |
+
|
95 |
+
# Correlation Analysis
|
96 |
+
corr_matrix = df.corr()
|
97 |
+
plt.figure(figsize=(16, 12))
|
98 |
+
sns.heatmap(corr_matrix, annot=True, fmt='.2f', cmap='coolwarm', square=True)
|
99 |
+
plt.title('Correlation Matrix')
|
100 |
+
#plt.show()
|
101 |
+
corr_matrix_str = corr_matrix.to_string()
|
102 |
+
print(corr_matrix_str)
|
103 |
+
corr_matrix_plot_path = f"./{project}/artifacts/"
|
104 |
+
corr_matrix_csv = corr_matrix.to_csv(f"./{project}/artifacts/corr_matrix_{project}_correlation_matrix.csv", index=True)
|
105 |
+
wandb.log({"corr_matrix": corr_matrix})
|
106 |
+
|
107 |
+
|
108 |
+
# Step 3: Log the Correlation Matrix as an Artifact in W&B
|
109 |
+
art_path=f"./{project}/artifacts/corr_matrix_{project}_"
|
110 |
+
artifact = wandb.Artifact(name='correlation_matrix', type='dataset', description="Correlation matrix of the dataset")
|
111 |
+
artifact.add_file(f"./{project}/artifacts/corr_matrix_{project}_correlation_matrix.csv")
|
112 |
+
wandb.log_artifact(artifact)
|
113 |
+
|
114 |
+
# Optional: Visualize the Correlation Matrix
|
115 |
+
plt.figure(figsize=(16, 12))
|
116 |
+
sns.heatmap(corr_matrix, annot=True, fmt='.2f', cmap='coolwarm', square=True)
|
117 |
+
plt.title('Correlation Matrix After Encoding')
|
118 |
+
plt.show()
|
119 |
+
|
120 |
+
print("Finishing WandB...")
|
121 |
+
if wandb.run is not None:
|
122 |
+
final_csv = df_transform.to_csv(f"{project}/data/{datetime.now().strftime('%Y_%m_%d')}_{project}_v{version}.csv", index=True)
|
123 |
+
final_data_artifact = wandb.Artifact("final_data", type='dataset')
|
124 |
+
final_data_artifact.add_file(final_csv)
|
125 |
+
wandb.log_artifact(final_data_artifact)
|
126 |
+
wandb.finish()
|
127 |
+
print("ALL DONE...")
|
128 |
+
|
data/processed/supervized_ml.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
from typing import Any, Dict, Tuple
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import torch
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
from sklearn.ensemble import RandomForestClassifier
|
10 |
+
from sklearn.feature_selection import SelectKBest, f_classif
|
11 |
+
from sklearn.linear_model import LogisticRegression
|
12 |
+
from sklearn.metrics import (accuracy_score, classification_report,
|
13 |
+
confusion_matrix, f1_score, roc_auc_score)
|
14 |
+
from sklearn.model_selection import train_test_split
|
15 |
+
from sklearn.preprocessing import StandardScaler
|
16 |
+
from sklearn.svm import SVC
|
17 |
+
|
18 |
+
import wandb
|
19 |
+
|
20 |
+
# Load environment variables
|
21 |
+
load_dotenv()
|
22 |
+
|
23 |
+
# Load project details
|
24 |
+
from eda_code_final_fixed import project, version
|
25 |
+
|
26 |
+
|
27 |
+
def initialize_project(project: str, version: str) -> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series, Dict[str, Any]]:
|
28 |
+
"""
|
29 |
+
Initializes a project and performs a train-test split on the processed data.
|
30 |
+
|
31 |
+
Parameters:
|
32 |
+
project (str): The name of the project.
|
33 |
+
version (str): The version of the project.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
tuple: A tuple containing the following:
|
37 |
+
- X_train (pd.DataFrame): The training features.
|
38 |
+
- X_test (pd.DataFrame): The testing features.
|
39 |
+
- y_train (pd.Series): The training targets.
|
40 |
+
- y_test (pd.Series): The testing targets.
|
41 |
+
- models (dict): A dictionary of model instances.
|
42 |
+
"""
|
43 |
+
data_path = "/Users/nullzero/Documents/repos/github.com/privacy-identity/vda-simulation-medical/vda-sim-medical/data/processed/PII_Customer_Personality_Analysis/data/2024_08_25_PII_Customer_Personality_Analysis_v0.1.csv"
|
44 |
+
|
45 |
+
# Load the processed data
|
46 |
+
df_processed = pd.read_csv(data_path)
|
47 |
+
|
48 |
+
# Train-Test Split
|
49 |
+
X = df_processed.drop(columns=['target'])
|
50 |
+
y = df_processed['target']
|
51 |
+
|
52 |
+
# Select the top 10 features
|
53 |
+
selector = SelectKBest(score_func=f_classif, k=10)
|
54 |
+
X_new = selector.fit_transform(X, y)
|
55 |
+
selected_features = X.columns[selector.get_support()]
|
56 |
+
X = pd.DataFrame(X_new, columns=selected_features)
|
57 |
+
|
58 |
+
# Log the selected features to W&B
|
59 |
+
wandb.init(project=project, entity="orionai", name="supervized_binary_classification", job_type="supervized_train")
|
60 |
+
wandb.log({"selected_features": selected_features.tolist()})
|
61 |
+
|
62 |
+
# Normalize the data
|
63 |
+
scaler = StandardScaler()
|
64 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
|
65 |
+
X_train = scaler.fit_transform(X_train)
|
66 |
+
X_test = scaler.transform(X_test)
|
67 |
+
|
68 |
+
# Define models
|
69 |
+
models = {
|
70 |
+
"Logistic Regression": LogisticRegression(random_state=42, max_iter=1000),
|
71 |
+
"Random Forest": RandomForestClassifier(random_state=42, n_estimators=100),
|
72 |
+
"SVM": SVC(random_state=42, probability=True)
|
73 |
+
}
|
74 |
+
|
75 |
+
return X_train, X_test, y_train, y_test, models
|
76 |
+
|
77 |
+
|
78 |
+
def training_clf(X_train: pd.DataFrame, X_test: pd.DataFrame, y_train: pd.Series, y_test: pd.Series, models: Dict[str, Any], project: str, version: str) -> Dict[str, Any]:
|
79 |
+
"""
|
80 |
+
Trains and logs multiple classification models using Weights & Biases (W&B).
|
81 |
+
|
82 |
+
Args:
|
83 |
+
X_train (pd.DataFrame): The training features.
|
84 |
+
X_test (pd.DataFrame): The testing features.
|
85 |
+
y_train (pd.Series): The training targets.
|
86 |
+
y_test (pd.Series): The testing targets.
|
87 |
+
models (dict): A dictionary of classification models to train and log.
|
88 |
+
project (str): The W&B project name.
|
89 |
+
version (str): The model version.
|
90 |
+
|
91 |
+
Returns:
|
92 |
+
dict: A dictionary containing the model name, classification report, confusion matrix, accuracy, ROC AUC, and F1 score for each model.
|
93 |
+
"""
|
94 |
+
results = {}
|
95 |
+
|
96 |
+
for model_name, model in models.items():
|
97 |
+
# Initialize a new W&B run for each model
|
98 |
+
run = wandb.init(project=project, entity="orionai", job_type="supervized_train", name=model_name)
|
99 |
+
|
100 |
+
# Train the model
|
101 |
+
model.fit(X_train, y_train)
|
102 |
+
|
103 |
+
# Predict and evaluate
|
104 |
+
y_pred = model.predict(X_test)
|
105 |
+
y_prob = model.predict_proba(X_test)[:, 1] if hasattr(model, "predict_proba") else None
|
106 |
+
accuracy = accuracy_score(y_test, y_pred)
|
107 |
+
roc_auc = roc_auc_score(y_test, y_prob) if y_prob is not None else None
|
108 |
+
f1_metric = f1_score(y_test, y_pred)
|
109 |
+
|
110 |
+
# Log metrics
|
111 |
+
wandb.log({
|
112 |
+
"accuracy": accuracy,
|
113 |
+
"roc_auc": roc_auc,
|
114 |
+
"f1_score": f1_metric
|
115 |
+
})
|
116 |
+
|
117 |
+
# Log model
|
118 |
+
wandb.sklearn.plot_classifier(model, X_train, X_test, y_train, y_test, y_pred, y_prob, labels=["Not Buy", "Buy"])
|
119 |
+
|
120 |
+
# Save the model to a file
|
121 |
+
model_filename = f"{model_name.replace(' ', '_').lower()}_model_v{version}.pkl"
|
122 |
+
torch.save(model, model_filename)
|
123 |
+
|
124 |
+
# Create and log the W&B artifact for the model
|
125 |
+
model_artifact = wandb.Artifact(name=f"{model_name.replace(' ', '_').lower()}_v{version}", type='model')
|
126 |
+
model_artifact.add_file(model_filename)
|
127 |
+
wandb.log_artifact(model_artifact)
|
128 |
+
|
129 |
+
# Log classification report and confusion matrix
|
130 |
+
class_report = classification_report(y_test, y_pred, output_dict=True)
|
131 |
+
conf_matrix = confusion_matrix(y_test, y_pred)
|
132 |
+
|
133 |
+
wandb.log({
|
134 |
+
"classification_report": class_report,
|
135 |
+
"confusion_matrix": conf_matrix
|
136 |
+
})
|
137 |
+
|
138 |
+
results[model_name] = {
|
139 |
+
"clf_report": class_report,
|
140 |
+
"conf_matrix": conf_matrix,
|
141 |
+
"accuracy": accuracy,
|
142 |
+
"roc_auc": roc_auc,
|
143 |
+
"f1_score": f1_metric
|
144 |
+
}
|
145 |
+
|
146 |
+
# End W&B run for this model
|
147 |
+
run.finish()
|
148 |
+
|
149 |
+
return results
|
150 |
+
|
151 |
+
|
152 |
+
def json_convert(input_dict: Dict[str, Any], project: str) -> str:
|
153 |
+
"""
|
154 |
+
Converts a dictionary into a JSON file and saves it to a specified directory.
|
155 |
+
|
156 |
+
Args:
|
157 |
+
input_dict (dict): The dictionary to be converted into a JSON file.
|
158 |
+
project (str): The name of the project for directory organization.
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
str: The file path where the JSON file is saved.
|
162 |
+
"""
|
163 |
+
# Ensure the folder exists
|
164 |
+
folder_path = f"../data/{project}/results/"
|
165 |
+
os.makedirs(folder_path, exist_ok=True)
|
166 |
+
|
167 |
+
file_name = f"{project}_supervized_v{random.randint(1, 100)}.json"
|
168 |
+
file_path = os.path.join(folder_path, file_name)
|
169 |
+
|
170 |
+
with open(file_path, 'w') as json_file:
|
171 |
+
json.dump(input_dict, json_file, indent=4)
|
172 |
+
|
173 |
+
print(f"Results saved to {file_path}")
|
174 |
+
|
175 |
+
return file_path
|
176 |
+
|
177 |
+
|
178 |
+
def main():
|
179 |
+
device = "mps" if torch.backends.mps.is_available() else "cpu"
|
180 |
+
|
181 |
+
print("Initializing project...")
|
182 |
+
X_train, X_test, y_train, y_test, models = initialize_project(project, version)
|
183 |
+
|
184 |
+
print("Training classifiers...")
|
185 |
+
clf_train_results = training_clf(X_train, X_test, y_train, y_test, models, project, version)
|
186 |
+
|
187 |
+
print("Saving results to JSON...")
|
188 |
+
json_convert(clf_train_results, project)
|
189 |
+
|
190 |
+
print("Finished.")
|
191 |
+
|
192 |
+
|
193 |
+
if __name__ == '__main__':
|
194 |
+
main()
|
195 |
+
|
data/raw/customer_profile_eda_normalization.ipynb
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import pandas as pd\n"
|
10 |
+
]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"cell_type": "code",
|
14 |
+
"execution_count": null,
|
15 |
+
"metadata": {},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"from datasets import load_dataset\n",
|
19 |
+
"df_raw = load_dataset('Ezi/medical_and_legislators_synthetic')\n",
|
20 |
+
"df_pandas = df_raw['train'].to_pandas()\n",
|
21 |
+
"df_pandas.columns"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "code",
|
26 |
+
"execution_count": null,
|
27 |
+
"metadata": {},
|
28 |
+
"outputs": [],
|
29 |
+
"source": [
|
30 |
+
"df_raw = pd.read_csv('/Users/nullzero/Documents/repos/github.com/privacy-identity/vda-simulation-medical/vda-sim-medical/data/raw/marketing_campaign.csv', sep='\\t')\n",
|
31 |
+
"df_raw.shape\n",
|
32 |
+
"df_raw[\"Response\"].unique()\n",
|
33 |
+
"df_raw.rename(columns={'Reponse': 'target'}, inplace=True)"
|
34 |
+
]
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"cell_type": "code",
|
38 |
+
"execution_count": null,
|
39 |
+
"metadata": {},
|
40 |
+
"outputs": [],
|
41 |
+
"source": [
|
42 |
+
"df = df_raw.copy()"
|
43 |
+
]
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "code",
|
47 |
+
"execution_count": null,
|
48 |
+
"metadata": {},
|
49 |
+
"outputs": [],
|
50 |
+
"source": [
|
51 |
+
"print(df.describe) "
|
52 |
+
]
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"cell_type": "code",
|
56 |
+
"execution_count": 122,
|
57 |
+
"metadata": {},
|
58 |
+
"outputs": [
|
59 |
+
{
|
60 |
+
"name": "stdout",
|
61 |
+
"output_type": "stream",
|
62 |
+
"text": [
|
63 |
+
"shape is (2240, 29) and there are ID 0\n",
|
64 |
+
"Year_Birth 0\n",
|
65 |
+
"Education 0\n",
|
66 |
+
"Marital_Status 0\n",
|
67 |
+
"Income 24\n",
|
68 |
+
"Kidhome 0\n",
|
69 |
+
"Teenhome 0\n",
|
70 |
+
"Dt_Customer 0\n",
|
71 |
+
"Recency 0\n",
|
72 |
+
"MntWines 0\n",
|
73 |
+
"MntFruits 0\n",
|
74 |
+
"MntMeatProducts 0\n",
|
75 |
+
"MntFishProducts 0\n",
|
76 |
+
"MntSweetProducts 0\n",
|
77 |
+
"MntGoldProds 0\n",
|
78 |
+
"NumDealsPurchases 0\n",
|
79 |
+
"NumWebPurchases 0\n",
|
80 |
+
"NumCatalogPurchases 0\n",
|
81 |
+
"NumStorePurchases 0\n",
|
82 |
+
"NumWebVisitsMonth 0\n",
|
83 |
+
"AcceptedCmp3 0\n",
|
84 |
+
"AcceptedCmp4 0\n",
|
85 |
+
"AcceptedCmp5 0\n",
|
86 |
+
"AcceptedCmp1 0\n",
|
87 |
+
"AcceptedCmp2 0\n",
|
88 |
+
"Complain 0\n",
|
89 |
+
"Z_CostContact 0\n",
|
90 |
+
"Z_Revenue 0\n",
|
91 |
+
"Response 0\n",
|
92 |
+
"dtype: int64 NAs\n",
|
93 |
+
"Index(['ID', 'Year_Birth', 'Education', 'Marital_Status', 'Income', 'Kidhome',\n",
|
94 |
+
" 'Teenhome', 'Dt_Customer', 'Recency', 'MntWines', 'MntFruits',\n",
|
95 |
+
" 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts',\n",
|
96 |
+
" 'MntGoldProds', 'NumDealsPurchases', 'NumWebPurchases',\n",
|
97 |
+
" 'NumCatalogPurchases', 'NumStorePurchases', 'NumWebVisitsMonth',\n",
|
98 |
+
" 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'AcceptedCmp1',\n",
|
99 |
+
" 'AcceptedCmp2', 'Complain', 'Z_CostContact', 'Z_Revenue', 'Response'],\n",
|
100 |
+
" dtype='object')\n"
|
101 |
+
]
|
102 |
+
}
|
103 |
+
],
|
104 |
+
"source": [
|
105 |
+
"print(f\"shape is {df.shape} and there are {df.isna().sum()} NAs\")\n",
|
106 |
+
"print(df.columns)\n",
|
107 |
+
"#columns = [\"Unnamed: 0\", \"suffix\", \"nickname, \"]"
|
108 |
+
]
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"cell_type": "code",
|
112 |
+
"execution_count": 124,
|
113 |
+
"metadata": {},
|
114 |
+
"outputs": [],
|
115 |
+
"source": [
|
116 |
+
"df.to_csv(\"./customer_profile_marketing.csv\")"
|
117 |
+
]
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"cell_type": "code",
|
121 |
+
"execution_count": null,
|
122 |
+
"metadata": {},
|
123 |
+
"outputs": [],
|
124 |
+
"source": [
|
125 |
+
"df.head(10)"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "code",
|
130 |
+
"execution_count": null,
|
131 |
+
"metadata": {},
|
132 |
+
"outputs": [],
|
133 |
+
"source": [
|
134 |
+
"from sklearn.model_selection import train_test_split\n",
|
135 |
+
"from sklearn.cluster import KMeans, AgglomerativeClustering\n",
|
136 |
+
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
|
137 |
+
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
|
138 |
+
"import seaborn as sns\n"
|
139 |
+
]
|
140 |
+
},
|
141 |
+
{
|
142 |
+
"cell_type": "code",
|
143 |
+
"execution_count": null,
|
144 |
+
"metadata": {},
|
145 |
+
"outputs": [],
|
146 |
+
"source": [
|
147 |
+
"import pandas as pd\n",
|
148 |
+
"import numpy as np\n",
|
149 |
+
"import seaborn as sns\n",
|
150 |
+
"import matplotlib.pyplot as plt\n",
|
151 |
+
"from datetime import datetime\n",
|
152 |
+
"\n",
|
153 |
+
"# Drop irrelevant columns\n",
|
154 |
+
"df_bak = df.copy()\n",
|
155 |
+
"df = df_bak\n",
|
156 |
+
"\n",
|
157 |
+
"# Handle missing values\n",
|
158 |
+
"df['middle_name'].fillna('None', inplace=True)\n",
|
159 |
+
"df['suffix'].fillna('None', inplace=True)\n",
|
160 |
+
"\n",
|
161 |
+
"# Feature engineering - calculate age\n",
|
162 |
+
"df['birthday'] = pd.to_datetime(df['birthday'])\n",
|
163 |
+
"#df['age'] = (pd.Timestamp.now() - df['birthday']).astype('<m8[ns]')\n",
|
164 |
+
"\n",
|
165 |
+
"# Categorical encoding\n",
|
166 |
+
"df = pd.get_dummies(df, columns=['gender', 'party', 'state', 'type'])\n",
|
167 |
+
"\n",
|
168 |
+
"# Drop columns with all missing values (like washington_post_id.1)\n",
|
169 |
+
"df.dropna(axis=1, how='all', inplace=True)\n",
|
170 |
+
"\n",
|
171 |
+
"# Chart age before normalization\n",
|
172 |
+
"sns.boxplot(x=df['age'])\n",
|
173 |
+
"plt.title('Boxplot of Age to Detect Outliers')\n",
|
174 |
+
"plt.show()\n",
|
175 |
+
"\n",
|
176 |
+
"# Remove outliers for the age column (optional, if outliers are detected)\n",
|
177 |
+
"q_low = df['age'].quantile(0.1)\n",
|
178 |
+
"q_high = df['age'].quantile(0.90)\n",
|
179 |
+
"#df_filtered = df[(df['age'] > q_low) & (df['age'] < q_high)]\n",
|
180 |
+
"\n",
|
181 |
+
"# Visualization - Distribution of Age with limited bins\n",
|
182 |
+
"sns.histplot(df['age']) # Reduced bins for better performance\n",
|
183 |
+
"plt.title('Distribution of Age')\n",
|
184 |
+
"plt.xlabel('Age')\n",
|
185 |
+
"plt.ylabel('Frequency')\n",
|
186 |
+
"plt.show()\n",
|
187 |
+
"\n",
|
188 |
+
"\n",
|
189 |
+
"# Normalize continuous variables\n"
|
190 |
+
]
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"cell_type": "code",
|
194 |
+
"execution_count": null,
|
195 |
+
"metadata": {},
|
196 |
+
"outputs": [],
|
197 |
+
"source": [
|
198 |
+
"import pandas as pd\n",
|
199 |
+
"import numpy as np\n",
|
200 |
+
"from sklearn.preprocessing import StandardScaler, LabelEncoder\n",
|
201 |
+
"from datetime import datetime\n",
|
202 |
+
"\n",
|
203 |
+
"# Drop irrelevant columns and duplicates\n",
|
204 |
+
"critical_columns = ['last_name', 'first_name', 'age', 'gender', 'state', 'district', \\\n",
|
205 |
+
" 'party', 'twitter', 'facebook', 'youtube']\n",
|
206 |
+
"\n",
|
207 |
+
"# Handle missing values\n",
|
208 |
+
"df['middle_name'].fillna('None', inplace=True)\n",
|
209 |
+
"df['suffix'].fillna('None', inplace=True)\n",
|
210 |
+
"print(\"Suffix and middle name filled with 'None'\")\n",
|
211 |
+
"\n",
|
212 |
+
"# Feature engineering - calculate age\n",
|
213 |
+
"\n",
|
214 |
+
"# Ensure 'birthday' is in datetime format\n",
|
215 |
+
"df['birthday'] = pd.to_datetime(df['birthday']).dt.floor('D')\n",
|
216 |
+
"\n",
|
217 |
+
"# Calculate the current date\n",
|
218 |
+
"today = pd.Timestamp.now().normalize()\n",
|
219 |
+
"\n",
|
220 |
+
"# Calculate age in years\n",
|
221 |
+
"df['age'] = today.year - df['birthday'].dt.year\n",
|
222 |
+
"print(df['age'][0:3])\n",
|
223 |
+
"# Select only critical columns\n",
|
224 |
+
"df_reduced = df.copy()\n",
|
225 |
+
"df_reduced = df[critical_columns]\n",
|
226 |
+
"print(df_reduced.columns)\n"
|
227 |
+
]
|
228 |
+
},
|
229 |
+
{
|
230 |
+
"cell_type": "code",
|
231 |
+
"execution_count": null,
|
232 |
+
"metadata": {},
|
233 |
+
"outputs": [],
|
234 |
+
"source": [
|
235 |
+
"\n",
|
236 |
+
"# Categorical encoding with LabelEncoder for binary categories\n",
|
237 |
+
"label_encoder = LabelEncoder()\n",
|
238 |
+
"df_reduced['gender'] = label_encoder.fit_transform(df_reduced['gender'])\n",
|
239 |
+
"\n",
|
240 |
+
"# Categorical encoding with OneHotEncoder for multi-class categories\n",
|
241 |
+
"df_reduced = pd.get_dummies(df_reduced, columns=['state', 'party', 'district'])\n",
|
242 |
+
"\n",
|
243 |
+
"# Handle online presence as binary features (e.g., presence/absence of account)\n",
|
244 |
+
"df_reduced['twitter'] = df_reduced['twitter'].notna().astype(int)\n",
|
245 |
+
"df_reduced['facebook'] = df_reduced['facebook'].notna().astype(int)\n",
|
246 |
+
"df_reduced['youtube'] = df_reduced['youtube'].notna().astype(int)\n",
|
247 |
+
"\n",
|
248 |
+
"# Normalize continuous variables\n",
|
249 |
+
"scaler = StandardScaler()\n",
|
250 |
+
"df_reduced[['age']] = scaler.fit_transform(df_reduced[['age']])\n",
|
251 |
+
"\n",
|
252 |
+
"# Visualization of the reduced dataframe\n",
|
253 |
+
"print(\"Reduced DataFrame:\")\n",
|
254 |
+
"print(df_reduced.head())\n",
|
255 |
+
"\n",
|
256 |
+
"# Correlation matrix to understand the relationship between features\n",
|
257 |
+
"import seaborn as sns\n",
|
258 |
+
"import matplotlib.pyplot as plt\n"
|
259 |
+
]
|
260 |
+
},
|
261 |
+
{
|
262 |
+
"cell_type": "code",
|
263 |
+
"execution_count": null,
|
264 |
+
"metadata": {},
|
265 |
+
"outputs": [],
|
266 |
+
"source": [
|
267 |
+
"\n",
|
268 |
+
"# Missing data visualization\n",
|
269 |
+
"plt.figure(figsize=(12,8))\n",
|
270 |
+
"sns.heatmap(df.isnull(), cbar=False, cmap='viridis')\n",
|
271 |
+
"plt.title('Missing Data Heatmap')\n",
|
272 |
+
"plt.show()\n",
|
273 |
+
"\n",
|
274 |
+
"# Categorical distribution of gender\n",
|
275 |
+
"sns.countplot(x='gender_M', data=df)\n",
|
276 |
+
"plt.title('Distribution of Gender')\n",
|
277 |
+
"plt.xlabel('Gender (1 = Male, 0 = Female)')\n",
|
278 |
+
"plt.ylabel('Count')\n",
|
279 |
+
"plt.show()\n",
|
280 |
+
"\n",
|
281 |
+
"# Distribution by Party\n",
|
282 |
+
"sns.countplot(x='party', data=df)\n",
|
283 |
+
"plt.title('Distribution by Party')\n",
|
284 |
+
"plt.xlabel('Party')\n",
|
285 |
+
"plt.ylabel('Count')\n",
|
286 |
+
"plt.show()\n",
|
287 |
+
"\n",
|
288 |
+
"plt.figure(figsize=(12,8))\n",
|
289 |
+
"sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap='coolwarm')\n",
|
290 |
+
"plt.title('Correlation Matrix')\n",
|
291 |
+
"plt.show()\n"
|
292 |
+
]
|
293 |
+
},
|
294 |
+
{
|
295 |
+
"cell_type": "code",
|
296 |
+
"execution_count": null,
|
297 |
+
"metadata": {},
|
298 |
+
"outputs": [],
|
299 |
+
"source": [
|
300 |
+
"\n"
|
301 |
+
]
|
302 |
+
},
|
303 |
+
{
|
304 |
+
"cell_type": "code",
|
305 |
+
"execution_count": null,
|
306 |
+
"metadata": {},
|
307 |
+
"outputs": [],
|
308 |
+
"source": []
|
309 |
+
}
|
310 |
+
],
|
311 |
+
"metadata": {
|
312 |
+
"kernelspec": {
|
313 |
+
"display_name": "Python 3 (ipykernel)",
|
314 |
+
"language": "python",
|
315 |
+
"name": "python3"
|
316 |
+
},
|
317 |
+
"language_info": {
|
318 |
+
"codemirror_mode": {
|
319 |
+
"name": "ipython",
|
320 |
+
"version": 3
|
321 |
+
},
|
322 |
+
"file_extension": ".py",
|
323 |
+
"mimetype": "text/x-python",
|
324 |
+
"name": "python",
|
325 |
+
"nbconvert_exporter": "python",
|
326 |
+
"pygments_lexer": "ipython3",
|
327 |
+
"version": "3.11.9"
|
328 |
+
}
|
329 |
+
},
|
330 |
+
"nbformat": 4,
|
331 |
+
"nbformat_minor": 2
|
332 |
+
}
|
data/raw/data_notes.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Verida + PyTorch Differential Privacy Experiment
|
2 |
+
|
3 |
+
# Dataset:
|
4 |
+
https://huggingface.co/datasets/Ezi/medical_and_legislators_synthetic
|
5 |
+
|
6 |
+
# Objective:
|
7 |
+
|
8 |
+
#
|
9 |
+
|
10 |
+
# Notes:
|
11 |
+
1. Appended with synthetically generated VDA DiD
|
data/raw/hf/.gitattributes
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
# Audio files - uncompressed
|
38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
+
# Audio files - compressed
|
42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
+
# Image files - uncompressed
|
48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
+
# Image files - compressed
|
53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
# Video files - compressed
|
57 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
data/raw/hf/README.md
CHANGED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- machine-generated
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
language_creators:
|
7 |
+
- machine-generated
|
8 |
+
license:
|
9 |
+
- mit
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
pretty_name: Medical data with DiD's randomly generated
|
13 |
+
size_categories:
|
14 |
+
- n<1K
|
15 |
+
source_datasets:
|
16 |
+
- original
|
17 |
+
tags: []
|
18 |
+
task_categories:
|
19 |
+
- tabular-classification
|
20 |
+
- text-classification
|
21 |
+
task_ids:
|
22 |
+
- tabular-multi-class-classification
|
23 |
+
- multi-class-classification
|
24 |
+
---
|
25 |
+
|
26 |
+
# Dataset Card for [Dataset Name]
|
27 |
+
|
28 |
+
## Table of Contents
|
29 |
+
- [Table of Contents](#table-of-contents)
|
30 |
+
- [Dataset Description](#dataset-description)
|
31 |
+
- [Dataset Summary](#dataset-summary)
|
32 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
33 |
+
- [Languages](#languages)
|
34 |
+
- [Dataset Structure](#dataset-structure)
|
35 |
+
- [Data Instances](#data-instances)
|
36 |
+
- [Data Fields](#data-fields)
|
37 |
+
- [Data Splits](#data-splits)
|
38 |
+
- [Dataset Creation](#dataset-creation)
|
39 |
+
- [Curation Rationale](#curation-rationale)
|
40 |
+
- [Source Data](#source-data)
|
41 |
+
- [Annotations](#annotations)
|
42 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
43 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
44 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
45 |
+
- [Discussion of Biases](#discussion-of-biases)
|
46 |
+
- [Other Known Limitations](#other-known-limitations)
|
47 |
+
- [Additional Information](#additional-information)
|
48 |
+
- [Dataset Curators](#dataset-curators)
|
49 |
+
- [Licensing Information](#licensing-information)
|
50 |
+
- [Citation Information](#citation-information)
|
51 |
+
- [Contributions](#contributions)
|
52 |
+
|
53 |
+
## Dataset Description
|
54 |
+
Medical data with DiD's for differential privacy.
|
55 |
+
|
data/raw/marketing_campaign.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/raw/medical_data_raw.ipynb
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# PyTorch Differential Privacy Experiment"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 1,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [
|
15 |
+
{
|
16 |
+
"name": "stdout",
|
17 |
+
"output_type": "stream",
|
18 |
+
"text": [
|
19 |
+
"Collecting torch\n",
|
20 |
+
" Downloading torch-2.4.0-cp311-none-macosx_11_0_arm64.whl.metadata (26 kB)\n",
|
21 |
+
"Collecting torchvision\n",
|
22 |
+
" Downloading torchvision-0.19.0-cp311-cp311-macosx_11_0_arm64.whl.metadata (6.0 kB)\n",
|
23 |
+
"Collecting opacus\n",
|
24 |
+
" Downloading opacus-1.5.2-py3-none-any.whl.metadata (7.9 kB)\n",
|
25 |
+
"Requirement already satisfied: numpy in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (2.1.0)\n",
|
26 |
+
"Requirement already satisfied: pandas in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (2.2.2)\n",
|
27 |
+
"Requirement already satisfied: filelock in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from torch) (3.15.4)\n",
|
28 |
+
"Requirement already satisfied: typing-extensions>=4.8.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from torch) (4.12.2)\n",
|
29 |
+
"Collecting sympy (from torch)\n",
|
30 |
+
" Downloading sympy-1.13.2-py3-none-any.whl.metadata (12 kB)\n",
|
31 |
+
"Collecting networkx (from torch)\n",
|
32 |
+
" Using cached networkx-3.3-py3-none-any.whl.metadata (5.1 kB)\n",
|
33 |
+
"Requirement already satisfied: jinja2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from torch) (3.1.4)\n",
|
34 |
+
"Requirement already satisfied: fsspec in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from torch) (2024.6.1)\n",
|
35 |
+
"Collecting pillow!=8.3.*,>=5.3.0 (from torchvision)\n",
|
36 |
+
" Downloading pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl.metadata (9.2 kB)\n",
|
37 |
+
"Collecting numpy\n",
|
38 |
+
" Using cached numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl.metadata (114 kB)\n",
|
39 |
+
"Collecting scipy>=1.2 (from opacus)\n",
|
40 |
+
" Downloading scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl.metadata (60 kB)\n",
|
41 |
+
"Collecting opt-einsum>=3.3.0 (from opacus)\n",
|
42 |
+
" Downloading opt_einsum-3.3.0-py3-none-any.whl.metadata (6.5 kB)\n",
|
43 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas) (2.9.0.post0)\n",
|
44 |
+
"Requirement already satisfied: pytz>=2020.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas) (2024.1)\n",
|
45 |
+
"Requirement already satisfied: tzdata>=2022.7 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas) (2024.1)\n",
|
46 |
+
"Requirement already satisfied: six>=1.5 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from python-dateutil>=2.8.2->pandas) (1.16.0)\n",
|
47 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from jinja2->torch) (2.1.5)\n",
|
48 |
+
"Collecting mpmath<1.4,>=1.1.0 (from sympy->torch)\n",
|
49 |
+
" Using cached mpmath-1.3.0-py3-none-any.whl.metadata (8.6 kB)\n",
|
50 |
+
"Downloading torch-2.4.0-cp311-none-macosx_11_0_arm64.whl (62.1 MB)\n",
|
51 |
+
"\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.1/62.1 MB\u001b[0m \u001b[31m440.8 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0mm eta \u001b[36m0:00:01\u001b[0m[36m0:00:04\u001b[0m\n",
|
52 |
+
"\u001b[?25hDownloading torchvision-0.19.0-cp311-cp311-macosx_11_0_arm64.whl (1.7 MB)\n",
|
53 |
+
"\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m560.7 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m1m557.0 kB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\n",
|
54 |
+
"\u001b[?25hDownloading opacus-1.5.2-py3-none-any.whl (239 kB)\n",
|
55 |
+
"Using cached numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl (14.0 MB)\n",
|
56 |
+
"Downloading opt_einsum-3.3.0-py3-none-any.whl (65 kB)\n",
|
57 |
+
"Downloading pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl (3.4 MB)\n",
|
58 |
+
"\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━��━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m457.2 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m kB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m:02\u001b[0m\n",
|
59 |
+
"\u001b[?25hDownloading scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl (23.1 MB)\n",
|
60 |
+
"\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.1/23.1 MB\u001b[0m \u001b[31m442.0 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0mm eta \u001b[36m0:00:01\u001b[0m[36m0:00:02\u001b[0m\n",
|
61 |
+
"\u001b[?25hUsing cached networkx-3.3-py3-none-any.whl (1.7 MB)\n",
|
62 |
+
"Downloading sympy-1.13.2-py3-none-any.whl (6.2 MB)\n",
|
63 |
+
"\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m462.5 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m1m487.3 kB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\n",
|
64 |
+
"\u001b[?25hUsing cached mpmath-1.3.0-py3-none-any.whl (536 kB)\n",
|
65 |
+
"Installing collected packages: mpmath, sympy, pillow, numpy, networkx, torch, scipy, opt-einsum, torchvision, opacus\n",
|
66 |
+
" Attempting uninstall: numpy\n",
|
67 |
+
" Found existing installation: numpy 2.1.0\n",
|
68 |
+
" Uninstalling numpy-2.1.0:\n",
|
69 |
+
" Successfully uninstalled numpy-2.1.0\n",
|
70 |
+
"Successfully installed mpmath-1.3.0 networkx-3.3 numpy-1.26.4 opacus-1.5.2 opt-einsum-3.3.0 pillow-10.4.0 scipy-1.14.1 sympy-1.13.2 torch-2.4.0 torchvision-0.19.0\n",
|
71 |
+
"Note: you may need to restart the kernel to use updated packages.\n",
|
72 |
+
"Requirement already satisfied: wandb in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (0.17.7)\n",
|
73 |
+
"Requirement already satisfied: datasets in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (2.21.0)\n",
|
74 |
+
"Requirement already satisfied: tqdm in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (4.66.5)\n",
|
75 |
+
"Requirement already satisfied: click!=8.0.0,>=7.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (8.1.7)\n",
|
76 |
+
"Requirement already satisfied: docker-pycreds>=0.4.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (0.4.0)\n",
|
77 |
+
"Requirement already satisfied: gitpython!=3.1.29,>=1.0.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (3.1.43)\n",
|
78 |
+
"Requirement already satisfied: platformdirs in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (4.2.2)\n",
|
79 |
+
"Requirement already satisfied: protobuf!=4.21.0,<6,>=3.19.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (5.27.3)\n",
|
80 |
+
"Requirement already satisfied: psutil>=5.0.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (6.0.0)\n",
|
81 |
+
"Requirement already satisfied: pyyaml in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (6.0.2)\n",
|
82 |
+
"Requirement already satisfied: requests<3,>=2.0.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (2.32.3)\n",
|
83 |
+
"Requirement already satisfied: sentry-sdk>=1.0.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (2.13.0)\n",
|
84 |
+
"Requirement already satisfied: setproctitle in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (1.3.3)\n",
|
85 |
+
"Requirement already satisfied: setuptools in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from wandb) (73.0.1)\n",
|
86 |
+
"Requirement already satisfied: filelock in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (3.15.4)\n",
|
87 |
+
"Requirement already satisfied: numpy>=1.17 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (1.26.4)\n",
|
88 |
+
"Requirement already satisfied: pyarrow>=15.0.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (17.0.0)\n",
|
89 |
+
"Requirement already satisfied: dill<0.3.9,>=0.3.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (0.3.8)\n",
|
90 |
+
"Requirement already satisfied: pandas in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (2.2.2)\n",
|
91 |
+
"Requirement already satisfied: xxhash in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (3.5.0)\n",
|
92 |
+
"Requirement already satisfied: multiprocess in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (0.70.16)\n",
|
93 |
+
"Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets) (2024.6.1)\n",
|
94 |
+
"Requirement already satisfied: aiohttp in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (3.10.5)\n",
|
95 |
+
"Requirement already satisfied: huggingface-hub>=0.21.2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (0.24.6)\n",
|
96 |
+
"Requirement already satisfied: packaging in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from datasets) (24.1)\n",
|
97 |
+
"Requirement already satisfied: six>=1.4.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from docker-pycreds>=0.4.0->wandb) (1.16.0)\n",
|
98 |
+
"Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (2.4.0)\n",
|
99 |
+
"Requirement already satisfied: aiosignal>=1.1.2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (1.3.1)\n",
|
100 |
+
"Requirement already satisfied: attrs>=17.3.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (24.2.0)\n",
|
101 |
+
"Requirement already satisfied: frozenlist>=1.1.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (1.4.1)\n",
|
102 |
+
"Requirement already satisfied: multidict<7.0,>=4.5 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (6.0.5)\n",
|
103 |
+
"Requirement already satisfied: yarl<2.0,>=1.0 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from aiohttp->datasets) (1.9.4)\n",
|
104 |
+
"Requirement already satisfied: gitdb<5,>=4.0.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from gitpython!=3.1.29,>=1.0.0->wandb) (4.0.11)\n",
|
105 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from huggingface-hub>=0.21.2->datasets) (4.12.2)\n",
|
106 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb) (3.3.2)\n",
|
107 |
+
"Requirement already satisfied: idna<4,>=2.5 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb) (3.7)\n",
|
108 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb) (2.2.2)\n",
|
109 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb) (2024.7.4)\n",
|
110 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas->datasets) (2.9.0.post0)\n",
|
111 |
+
"Requirement already satisfied: pytz>=2020.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas->datasets) (2024.1)\n",
|
112 |
+
"Requirement already satisfied: tzdata>=2022.7 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from pandas->datasets) (2024.1)\n",
|
113 |
+
"Requirement already satisfied: smmap<6,>=3.0.1 in /Users/nullzero/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.29,>=1.0.0->wandb) (5.0.1)\n",
|
114 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
115 |
+
]
|
116 |
+
}
|
117 |
+
],
|
118 |
+
"source": [
|
119 |
+
"%pip install -qqq torch torchvision opacus numpy pandas\n",
|
120 |
+
"%pip install -qqq wandb datasets tqdm\n"
|
121 |
+
]
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"cell_type": "code",
|
125 |
+
"execution_count": 2,
|
126 |
+
"metadata": {},
|
127 |
+
"outputs": [],
|
128 |
+
"source": [
|
129 |
+
"import os\n",
|
130 |
+
"import torch \n",
|
131 |
+
"from dotenv import load_dotenv\n",
|
132 |
+
"import wandb \n",
|
133 |
+
"import logging\n",
|
134 |
+
"import shutil\n",
|
135 |
+
"import sys\n",
|
136 |
+
"from datetime import datetime, timedelta\n",
|
137 |
+
"\n",
|
138 |
+
"import argparse\n",
|
139 |
+
"from collections import Counter\n",
|
140 |
+
"from pathlib import Path\n",
|
141 |
+
"from statistics import mean\n",
|
142 |
+
"\n",
|
143 |
+
"import torch\n",
|
144 |
+
"import torch.nn as nn\n",
|
145 |
+
"from opacus import PrivacyEngine\n",
|
146 |
+
"from opacus.layers import DPGRU, DPLSTM, DPRNN\n",
|
147 |
+
"from torch.nn.utils.rnn import pad_sequence\n",
|
148 |
+
"from torch.utils.data import DataLoader, Dataset\n",
|
149 |
+
"from tqdm import tqdm, tqdm_notebook"
|
150 |
+
]
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"cell_type": "code",
|
154 |
+
"execution_count": 3,
|
155 |
+
"metadata": {},
|
156 |
+
"outputs": [],
|
157 |
+
"source": [
|
158 |
+
"device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n",
|
159 |
+
"if os.path.exists('.env'):\n",
|
160 |
+
" load_dotenv('.env')\n"
|
161 |
+
]
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"cell_type": "code",
|
165 |
+
"execution_count": null,
|
166 |
+
"metadata": {},
|
167 |
+
"outputs": [],
|
168 |
+
"source": [
|
169 |
+
"logging.basicConfig(\n",
|
170 |
+
" format=\"%(asctime)s:%(levelname)s:%(message)s\",\n",
|
171 |
+
" datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
|
172 |
+
" stream=sys.stdout,\n",
|
173 |
+
")\n",
|
174 |
+
"logger = logging.getLogger(\"ddp\")\n",
|
175 |
+
"logger.setLevel(level=logging.INFO)\n"
|
176 |
+
]
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"cell_type": "code",
|
180 |
+
"execution_count": null,
|
181 |
+
"metadata": {},
|
182 |
+
"outputs": [],
|
183 |
+
"source": [
|
184 |
+
"wandb.login(key=os.getenv('WANDB_API_KEY'))\n",
|
185 |
+
"wandb.init(project=\"verida-pii\", name=\"verida_data_raw\")"
|
186 |
+
]
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"cell_type": "code",
|
190 |
+
"execution_count": null,
|
191 |
+
"metadata": {},
|
192 |
+
"outputs": [],
|
193 |
+
"source": [
|
194 |
+
"data_name = 'Ezi/medical_and_legislators_synthetic'"
|
195 |
+
]
|
196 |
+
}
|
197 |
+
],
|
198 |
+
"metadata": {
|
199 |
+
"kernelspec": {
|
200 |
+
"display_name": "Python 3 (ipykernel)",
|
201 |
+
"language": "python",
|
202 |
+
"name": "python3"
|
203 |
+
}
|
204 |
+
},
|
205 |
+
"nbformat": 4,
|
206 |
+
"nbformat_minor": 2
|
207 |
+
}
|
data/raw/raw_data.ipynb
ADDED
@@ -0,0 +1,662 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Data Preparation "
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 44,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"!poetry add -qqq python-dotenv datasets wandb didkit\n"
|
17 |
+
]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
+
"metadata": {},
|
23 |
+
"outputs": [],
|
24 |
+
"source": []
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"cell_type": "code",
|
28 |
+
"execution_count": 34,
|
29 |
+
"metadata": {},
|
30 |
+
"outputs": [],
|
31 |
+
"source": [
|
32 |
+
"import os\n",
|
33 |
+
"from dotenv import load_dotenv, find_dotenv\n",
|
34 |
+
"if os.path.exists('../env'):\n",
|
35 |
+
" load_dotenv(find_dotenv())\n",
|
36 |
+
"import wandb"
|
37 |
+
]
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"cell_type": "code",
|
41 |
+
"execution_count": 36,
|
42 |
+
"metadata": {},
|
43 |
+
"outputs": [
|
44 |
+
{
|
45 |
+
"name": "stderr",
|
46 |
+
"output_type": "stream",
|
47 |
+
"text": [
|
48 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m Calling wandb.login() after wandb.init() has no effect.\n"
|
49 |
+
]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"data": {
|
53 |
+
"text/html": [
|
54 |
+
"Finishing last run (ID:pnvhnkh8) before initializing another..."
|
55 |
+
],
|
56 |
+
"text/plain": [
|
57 |
+
"<IPython.core.display.HTML object>"
|
58 |
+
]
|
59 |
+
},
|
60 |
+
"metadata": {},
|
61 |
+
"output_type": "display_data"
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"data": {
|
65 |
+
"application/vnd.jupyter.widget-view+json": {
|
66 |
+
"model_id": "19afd929911c42f6ab4e9604948ade90",
|
67 |
+
"version_major": 2,
|
68 |
+
"version_minor": 0
|
69 |
+
},
|
70 |
+
"text/plain": [
|
71 |
+
"VBox(children=(Label(value='0.001 MB of 0.001 MB uploaded\\r'), FloatProgress(value=1.0, max=1.0)))"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
"metadata": {},
|
75 |
+
"output_type": "display_data"
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"data": {
|
79 |
+
"text/html": [
|
80 |
+
" View run <strong style=\"color:#cdcd00\">verida_data_raw</strong> at: <a href='https://wandb.ai/orion-agents/verida-pii/runs/pnvhnkh8' target=\"_blank\">https://wandb.ai/orion-agents/verida-pii/runs/pnvhnkh8</a><br/> View project at: <a href='https://wandb.ai/orion-agents/verida-pii' target=\"_blank\">https://wandb.ai/orion-agents/verida-pii</a><br/>Synced 5 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)"
|
81 |
+
],
|
82 |
+
"text/plain": [
|
83 |
+
"<IPython.core.display.HTML object>"
|
84 |
+
]
|
85 |
+
},
|
86 |
+
"metadata": {},
|
87 |
+
"output_type": "display_data"
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"data": {
|
91 |
+
"text/html": [
|
92 |
+
"Find logs at: <code>./wandb/run-20240825_035519-pnvhnkh8/logs</code>"
|
93 |
+
],
|
94 |
+
"text/plain": [
|
95 |
+
"<IPython.core.display.HTML object>"
|
96 |
+
]
|
97 |
+
},
|
98 |
+
"metadata": {},
|
99 |
+
"output_type": "display_data"
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"data": {
|
103 |
+
"text/html": [
|
104 |
+
"The new W&B backend becomes opt-out in version 0.18.0; try it out with `wandb.require(\"core\")`! See https://wandb.me/wandb-core for more information."
|
105 |
+
],
|
106 |
+
"text/plain": [
|
107 |
+
"<IPython.core.display.HTML object>"
|
108 |
+
]
|
109 |
+
},
|
110 |
+
"metadata": {},
|
111 |
+
"output_type": "display_data"
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"data": {
|
115 |
+
"text/html": [
|
116 |
+
"Successfully finished last run (ID:pnvhnkh8). Initializing new run:<br/>"
|
117 |
+
],
|
118 |
+
"text/plain": [
|
119 |
+
"<IPython.core.display.HTML object>"
|
120 |
+
]
|
121 |
+
},
|
122 |
+
"metadata": {},
|
123 |
+
"output_type": "display_data"
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"data": {
|
127 |
+
"application/vnd.jupyter.widget-view+json": {
|
128 |
+
"model_id": "76495a6b8c1843b09162b9ec31d99dfe",
|
129 |
+
"version_major": 2,
|
130 |
+
"version_minor": 0
|
131 |
+
},
|
132 |
+
"text/plain": [
|
133 |
+
"VBox(children=(Label(value='Waiting for wandb.init()...\\r'), FloatProgress(value=0.01112003148947325, max=1.0)…"
|
134 |
+
]
|
135 |
+
},
|
136 |
+
"metadata": {},
|
137 |
+
"output_type": "display_data"
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"data": {
|
141 |
+
"text/html": [
|
142 |
+
"Tracking run with wandb version 0.17.7"
|
143 |
+
],
|
144 |
+
"text/plain": [
|
145 |
+
"<IPython.core.display.HTML object>"
|
146 |
+
]
|
147 |
+
},
|
148 |
+
"metadata": {},
|
149 |
+
"output_type": "display_data"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"data": {
|
153 |
+
"text/html": [
|
154 |
+
"Run data is saved locally in <code>/Users/nullzero/Documents/repos/github.com/privacy-identity/vda-simulation-medical/vda-sim-medical/wandb/run-20240825_035604-69f6mbdr</code>"
|
155 |
+
],
|
156 |
+
"text/plain": [
|
157 |
+
"<IPython.core.display.HTML object>"
|
158 |
+
]
|
159 |
+
},
|
160 |
+
"metadata": {},
|
161 |
+
"output_type": "display_data"
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"data": {
|
165 |
+
"text/html": [
|
166 |
+
"Syncing run <strong><a href='https://wandb.ai/orion-agents/verida-pii/runs/69f6mbdr' target=\"_blank\">verida_data_raw</a></strong> to <a href='https://wandb.ai/orion-agents/verida-pii' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
|
167 |
+
],
|
168 |
+
"text/plain": [
|
169 |
+
"<IPython.core.display.HTML object>"
|
170 |
+
]
|
171 |
+
},
|
172 |
+
"metadata": {},
|
173 |
+
"output_type": "display_data"
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"data": {
|
177 |
+
"text/html": [
|
178 |
+
" View project at <a href='https://wandb.ai/orion-agents/verida-pii' target=\"_blank\">https://wandb.ai/orion-agents/verida-pii</a>"
|
179 |
+
],
|
180 |
+
"text/plain": [
|
181 |
+
"<IPython.core.display.HTML object>"
|
182 |
+
]
|
183 |
+
},
|
184 |
+
"metadata": {},
|
185 |
+
"output_type": "display_data"
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"data": {
|
189 |
+
"text/html": [
|
190 |
+
" View run at <a href='https://wandb.ai/orion-agents/verida-pii/runs/69f6mbdr' target=\"_blank\">https://wandb.ai/orion-agents/verida-pii/runs/69f6mbdr</a>"
|
191 |
+
],
|
192 |
+
"text/plain": [
|
193 |
+
"<IPython.core.display.HTML object>"
|
194 |
+
]
|
195 |
+
},
|
196 |
+
"metadata": {},
|
197 |
+
"output_type": "display_data"
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"name": "stderr",
|
201 |
+
"output_type": "stream",
|
202 |
+
"text": [
|
203 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 1:13:41.968146, resuming normal operation.\n",
|
204 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 0:42:49.841123, resuming normal operation.\n",
|
205 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Network error resolved after 0:18:54.049113, resuming normal operation.\n"
|
206 |
+
]
|
207 |
+
}
|
208 |
+
],
|
209 |
+
"source": [
|
210 |
+
"wandb.login(key=os.getenv('WANDB_API_KEY'))\n",
|
211 |
+
"run = wandb.init(project=\"verida-pii\", name=\"verida_data_raw\")"
|
212 |
+
]
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"cell_type": "code",
|
216 |
+
"execution_count": 62,
|
217 |
+
"metadata": {},
|
218 |
+
"outputs": [
|
219 |
+
{
|
220 |
+
"name": "stdout",
|
221 |
+
"output_type": "stream",
|
222 |
+
"text": [
|
223 |
+
"539\n"
|
224 |
+
]
|
225 |
+
}
|
226 |
+
],
|
227 |
+
"source": [
|
228 |
+
"from datasets import load_dataset\n",
|
229 |
+
"import pandas as pd\n",
|
230 |
+
"data_name=\"Ezi/medical_and_legislators_synthetic\"\n",
|
231 |
+
"data = load_dataset(path=data_name, split='train')\n",
|
232 |
+
"data_df = data.to_pandas()\n",
|
233 |
+
"data_df.head()\n",
|
234 |
+
"print(len(data_df))\n",
|
235 |
+
"\n"
|
236 |
+
]
|
237 |
+
},
|
238 |
+
{
|
239 |
+
"cell_type": "code",
|
240 |
+
"execution_count": 30,
|
241 |
+
"metadata": {},
|
242 |
+
"outputs": [
|
243 |
+
{
|
244 |
+
"data": {
|
245 |
+
"text/plain": [
|
246 |
+
"'mps'"
|
247 |
+
]
|
248 |
+
},
|
249 |
+
"execution_count": 30,
|
250 |
+
"metadata": {},
|
251 |
+
"output_type": "execute_result"
|
252 |
+
}
|
253 |
+
],
|
254 |
+
"source": [
|
255 |
+
"device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n",
|
256 |
+
"device"
|
257 |
+
]
|
258 |
+
},
|
259 |
+
{
|
260 |
+
"cell_type": "code",
|
261 |
+
"execution_count": 63,
|
262 |
+
"metadata": {},
|
263 |
+
"outputs": [],
|
264 |
+
"source": [
|
265 |
+
"# DiD Generator\n",
|
266 |
+
"import didkit\n",
|
267 |
+
"\n",
|
268 |
+
"def generate_did():\n",
|
269 |
+
" key = didkit.generate_ed25519_key()\n",
|
270 |
+
" did = didkit.key_to_did(\"key\", key)\n",
|
271 |
+
" return did, key"
|
272 |
+
]
|
273 |
+
},
|
274 |
+
{
|
275 |
+
"cell_type": "code",
|
276 |
+
"execution_count": 64,
|
277 |
+
"metadata": {},
|
278 |
+
"outputs": [],
|
279 |
+
"source": [
|
280 |
+
"from tqdm import tqdm, tqdm_notebook, tqdm_pandas\n",
|
281 |
+
"import pandas as pd"
|
282 |
+
]
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"cell_type": "code",
|
286 |
+
"execution_count": 70,
|
287 |
+
"metadata": {},
|
288 |
+
"outputs": [
|
289 |
+
{
|
290 |
+
"data": {
|
291 |
+
"text/plain": [
|
292 |
+
"Index(['last_name', 'first_name', 'middle_name', 'suffix', 'nickname',\n",
|
293 |
+
" 'full_name', 'birthday', 'gender', 'type', 'state', 'district',\n",
|
294 |
+
" 'senate_class', 'party', 'url', 'address', 'phone', 'contact_form',\n",
|
295 |
+
" 'rss_url', 'twitter', 'facebook', 'youtube', 'youtube_id',\n",
|
296 |
+
" 'bioguide_id', 'thomas_id', 'opensecrets_id', 'lis_id', 'fec_ids',\n",
|
297 |
+
" 'cspan_id', 'govtrack_id', 'votesmart_id', 'ballotpedia_id',\n",
|
298 |
+
" 'washington_post_id', 'icpsr_id', 'wikipedia_id', 'last_name.1',\n",
|
299 |
+
" 'first_name.1', 'middle_name.1', 'suffix.1', 'nickname.1',\n",
|
300 |
+
" 'full_name.1', 'birthday.1', 'gender.1', 'type.1', 'state.1',\n",
|
301 |
+
" 'district.1', 'senate_class.1', 'party.1', 'url.1', 'address.1',\n",
|
302 |
+
" 'phone.1', 'contact_form.1', 'rss_url.1', 'twitter.1', 'facebook.1',\n",
|
303 |
+
" 'youtube.1', 'youtube_id.1', 'bioguide_id.1', 'thomas_id.1',\n",
|
304 |
+
" 'opensecrets_id.1', 'lis_id.1', 'fec_ids.1', 'cspan_id.1',\n",
|
305 |
+
" 'govtrack_id.1', 'votesmart_id.1', 'ballotpedia_id.1',\n",
|
306 |
+
" 'washington_post_id.1', 'icpsr_id.1', 'wikipedia_id.1'],\n",
|
307 |
+
" dtype='object')"
|
308 |
+
]
|
309 |
+
},
|
310 |
+
"execution_count": 70,
|
311 |
+
"metadata": {},
|
312 |
+
"output_type": "execute_result"
|
313 |
+
}
|
314 |
+
],
|
315 |
+
"source": [
|
316 |
+
"#data_df['did'] = data_df.apply(lambda x: generate_did()[0], axis=1)\n",
|
317 |
+
"#data_df['key'] = data_df.apply(lambda x: generate_did()[1], axis=1)\n",
|
318 |
+
"cleaned_df = data_df.copy()\n",
|
319 |
+
"cleaned_df.head()\n",
|
320 |
+
"cleaned_df.isna().sum()\n",
|
321 |
+
"cleaned_df.isna().dropna()\n",
|
322 |
+
"cleaned_df.describe()\n",
|
323 |
+
"cleaned_df.shape\n",
|
324 |
+
"cleaned_df.columns"
|
325 |
+
]
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"cell_type": "code",
|
329 |
+
"execution_count": 71,
|
330 |
+
"metadata": {},
|
331 |
+
"outputs": [],
|
332 |
+
"source": [
|
333 |
+
"data_did = data_df.copy()\n",
|
334 |
+
"data_did.to_csv(\"data_did.csv\")\n",
|
335 |
+
"data_did"
|
336 |
+
]
|
337 |
+
},
|
338 |
+
{
|
339 |
+
"cell_type": "code",
|
340 |
+
"execution_count": 76,
|
341 |
+
"metadata": {},
|
342 |
+
"outputs": [
|
343 |
+
{
|
344 |
+
"ename": "ModuleNotFoundError",
|
345 |
+
"evalue": "No module named 'DatasetDict'",
|
346 |
+
"output_type": "error",
|
347 |
+
"traceback": [
|
348 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
349 |
+
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
|
350 |
+
"Cell \u001b[0;32mIn[76], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Back to dataset\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mDatasetDict\u001b[39;00m \n\u001b[1;32m 3\u001b[0m secure_mode \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 4\u001b[0m train_split \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0.8\u001b[39m\n",
|
351 |
+
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'DatasetDict'"
|
352 |
+
]
|
353 |
+
}
|
354 |
+
],
|
355 |
+
"source": [
|
356 |
+
"# Back to dataset\n",
|
357 |
+
"\n",
|
358 |
+
"secure_mode = False\n",
|
359 |
+
"train_split = 0.8\n",
|
360 |
+
"test_every = 5\n",
|
361 |
+
"batch_size = 800\n",
|
362 |
+
"\n",
|
363 |
+
"ds = data_did\n",
|
364 |
+
"train_len = int(train_split * len(ds))\n",
|
365 |
+
"test_len = len(ds) - train_len\n",
|
366 |
+
"\n",
|
367 |
+
"print(f\"{train_len} samples for training, {test_len} for testing\")\n",
|
368 |
+
"\n",
|
369 |
+
"train_ds, test_ds = torch.utils.data.random_split(ds, [train_len, test_len])\n"
|
370 |
+
]
|
371 |
+
},
|
372 |
+
{
|
373 |
+
"cell_type": "code",
|
374 |
+
"execution_count": 78,
|
375 |
+
"metadata": {},
|
376 |
+
"outputs": [
|
377 |
+
{
|
378 |
+
"ename": "TypeError",
|
379 |
+
"evalue": "expected str, bytes or os.PathLike object, not Subset",
|
380 |
+
"output_type": "error",
|
381 |
+
"traceback": [
|
382 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
383 |
+
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
|
384 |
+
"Cell \u001b[0;32mIn[78], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dataset\n\u001b[0;32m----> 2\u001b[0m ds \u001b[38;5;241m=\u001b[39m \u001b[43mload_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_ds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest_ds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msplit\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtest\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n",
|
385 |
+
"File \u001b[0;32m~/Library/Caches/pypoetry/virtualenvs/verida-differential-privacy-OB45ac0m-py3.11/lib/python3.11/site-packages/datasets/load.py:2588\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)\u001b[0m\n\u001b[1;32m 2586\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m data_files \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data_files:\n\u001b[1;32m 2587\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEmpty \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mdata_files\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdata_files\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m. It should be either non-empty or None (default).\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m-> 2588\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mPath\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mDATASET_STATE_JSON_FILENAME\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mexists():\n\u001b[1;32m 2589\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 2590\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are trying to load a dataset that was saved using `save_to_disk`. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 2591\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPlease use `load_from_disk` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 2592\u001b[0m )\n\u001b[1;32m 2594\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m streaming \u001b[38;5;129;01mand\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
386 |
+
"File \u001b[0;32m/opt/homebrew/Cellar/[email protected]/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:871\u001b[0m, in \u001b[0;36mPath.__new__\u001b[0;34m(cls, *args, **kwargs)\u001b[0m\n\u001b[1;32m 869\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m Path:\n\u001b[1;32m 870\u001b[0m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;241m=\u001b[39m WindowsPath \u001b[38;5;28;01mif\u001b[39;00m os\u001b[38;5;241m.\u001b[39mname \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mnt\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m PosixPath\n\u001b[0;32m--> 871\u001b[0m \u001b[38;5;28mself\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_from_parts\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 872\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_flavour\u001b[38;5;241m.\u001b[39mis_supported:\n\u001b[1;32m 873\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcannot instantiate \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m on your system\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 874\u001b[0m \u001b[38;5;241m%\u001b[39m (\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m,))\n",
|
387 |
+
"File \u001b[0;32m/opt/homebrew/Cellar/[email protected]/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:509\u001b[0m, in \u001b[0;36mPurePath._from_parts\u001b[0;34m(cls, args)\u001b[0m\n\u001b[1;32m 504\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 505\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_from_parts\u001b[39m(\u001b[38;5;28mcls\u001b[39m, args):\n\u001b[1;32m 506\u001b[0m \u001b[38;5;66;03m# We need to call _parse_args on the instance, so as to get the\u001b[39;00m\n\u001b[1;32m 507\u001b[0m \u001b[38;5;66;03m# right flavour.\u001b[39;00m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;28mself\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mobject\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__new__\u001b[39m(\u001b[38;5;28mcls\u001b[39m)\n\u001b[0;32m--> 509\u001b[0m drv, root, parts \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_args\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 510\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_drv \u001b[38;5;241m=\u001b[39m drv\n\u001b[1;32m 511\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_root \u001b[38;5;241m=\u001b[39m root\n",
|
388 |
+
"File \u001b[0;32m/opt/homebrew/Cellar/[email protected]/3.11.9_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/pathlib.py:493\u001b[0m, in \u001b[0;36mPurePath._parse_args\u001b[0;34m(cls, args)\u001b[0m\n\u001b[1;32m 491\u001b[0m parts \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m a\u001b[38;5;241m.\u001b[39m_parts\n\u001b[1;32m 492\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 493\u001b[0m a \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mfspath(a)\n\u001b[1;32m 494\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(a, \u001b[38;5;28mstr\u001b[39m):\n\u001b[1;32m 495\u001b[0m \u001b[38;5;66;03m# Force-cast str subclasses to str (issue #21127)\u001b[39;00m\n\u001b[1;32m 496\u001b[0m parts\u001b[38;5;241m.\u001b[39mappend(\u001b[38;5;28mstr\u001b[39m(a))\n",
|
389 |
+
"\u001b[0;31mTypeError\u001b[0m: expected str, bytes or os.PathLike object, not Subset"
|
390 |
+
]
|
391 |
+
}
|
392 |
+
],
|
393 |
+
"source": [
|
394 |
+
"from datasets import load_dataset\n",
|
395 |
+
"ds = load_dataset(train_ds, test_ds, split=[\"train\", \"test\"])\n"
|
396 |
+
]
|
397 |
+
},
|
398 |
+
{
|
399 |
+
"cell_type": "code",
|
400 |
+
"execution_count": 79,
|
401 |
+
"metadata": {},
|
402 |
+
"outputs": [],
|
403 |
+
"source": [
|
404 |
+
"import torch\n",
|
405 |
+
"import torch.nn as nn\n",
|
406 |
+
"\n",
|
407 |
+
"class CharByteEncoder(nn.Module):\n",
|
408 |
+
" \"\"\"\n",
|
409 |
+
" This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also\n",
|
410 |
+
" perform the opposite operation to check a result.\n",
|
411 |
+
" Examples:\n",
|
412 |
+
" >>> encoder = CharByteEncoder()\n",
|
413 |
+
" >>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])\n",
|
414 |
+
" >>> encoder.decode(t) # returns \"<s>Ślusàrski</s>\"\n",
|
415 |
+
" \"\"\"\n",
|
416 |
+
"\n",
|
417 |
+
" def __init__(self):\n",
|
418 |
+
" super().__init__()\n",
|
419 |
+
" self.start_token = \"<s>\"\n",
|
420 |
+
" self.end_token = \"</s>\"\n",
|
421 |
+
" self.pad_token = \"<pad>\"\n",
|
422 |
+
"\n",
|
423 |
+
" self.start_idx = 256\n",
|
424 |
+
" self.end_idx = 257\n",
|
425 |
+
" self.pad_idx = 258\n",
|
426 |
+
"\n",
|
427 |
+
" def forward(self, s: str, pad_to=0) -> torch.LongTensor:\n",
|
428 |
+
" \"\"\"\n",
|
429 |
+
" Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>\n",
|
430 |
+
" (id=self.end_idx).\n",
|
431 |
+
" Args:\n",
|
432 |
+
" s: The string to encode.\n",
|
433 |
+
" pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.\n",
|
434 |
+
" Defaults to 0.\n",
|
435 |
+
" Returns:\n",
|
436 |
+
" The encoded LongTensor of indices.\n",
|
437 |
+
" \"\"\"\n",
|
438 |
+
" encoded = s.encode()\n",
|
439 |
+
" n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0\n",
|
440 |
+
" return torch.LongTensor(\n",
|
441 |
+
" [self.start_idx]\n",
|
442 |
+
" + [c for c in encoded] # noqa\n",
|
443 |
+
" + [self.end_idx]\n",
|
444 |
+
" + [self.pad_idx for _ in range(n_pad)]\n",
|
445 |
+
" )\n",
|
446 |
+
"\n",
|
447 |
+
" def decode(self, char_ids_tensor: torch.LongTensor) -> str:\n",
|
448 |
+
" \"\"\"\n",
|
449 |
+
" The inverse of `forward`. Keeps the start, end, and pad indices.\n",
|
450 |
+
" \"\"\"\n",
|
451 |
+
" char_ids = char_ids_tensor.cpu().detach().tolist()\n",
|
452 |
+
"\n",
|
453 |
+
" out = []\n",
|
454 |
+
" buf = []\n",
|
455 |
+
" for c in char_ids:\n",
|
456 |
+
" if c < 256:\n",
|
457 |
+
" buf.append(c)\n",
|
458 |
+
" else:\n",
|
459 |
+
" if buf:\n",
|
460 |
+
" out.append(bytes(buf).decode())\n",
|
461 |
+
" buf = []\n",
|
462 |
+
" if c == self.start_idx:\n",
|
463 |
+
" out.append(self.start_token)\n",
|
464 |
+
" elif c == self.end_idx:\n",
|
465 |
+
" out.append(self.end_token)\n",
|
466 |
+
" elif c == self.pad_idx:\n",
|
467 |
+
" out.append(self.pad_token)\n",
|
468 |
+
"\n",
|
469 |
+
" if buf: # in case some are left\n",
|
470 |
+
" out.append(bytes(buf).decode())\n",
|
471 |
+
" return \"\".join(out)\n",
|
472 |
+
"\n",
|
473 |
+
" def __len__(self):\n",
|
474 |
+
" \"\"\"\n",
|
475 |
+
" The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars\n",
|
476 |
+
" (start, end, pad).\n",
|
477 |
+
" Returns:\n",
|
478 |
+
" 259\n",
|
479 |
+
" \"\"\"\n",
|
480 |
+
" return 259"
|
481 |
+
]
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"cell_type": "code",
|
485 |
+
"execution_count": 80,
|
486 |
+
"metadata": {},
|
487 |
+
"outputs": [],
|
488 |
+
"source": [
|
489 |
+
"from torch.nn.utils.rnn import pad_sequence\n",
|
490 |
+
"\n",
|
491 |
+
"def padded_collate(batch, padding_idx=0):\n",
|
492 |
+
" x = pad_sequence(\n",
|
493 |
+
" [elem[0] for elem in batch], batch_first=True, padding_value=padding_idx\n",
|
494 |
+
" )\n",
|
495 |
+
" y = torch.stack([elem[1] for elem in batch]).long()\n",
|
496 |
+
"\n",
|
497 |
+
" return x, y"
|
498 |
+
]
|
499 |
+
},
|
500 |
+
{
|
501 |
+
"cell_type": "code",
|
502 |
+
"execution_count": 74,
|
503 |
+
"metadata": {},
|
504 |
+
"outputs": [],
|
505 |
+
"source": [
|
506 |
+
"from torch.utils.data import Dataset\n",
|
507 |
+
"from pathlib import Path\n",
|
508 |
+
"\n",
|
509 |
+
"\n",
|
510 |
+
"class NamesDataset(Dataset):\n",
|
511 |
+
" def __init__(self, root):\n",
|
512 |
+
" self.root = Path(root)\n",
|
513 |
+
"\n",
|
514 |
+
" self.labels = list({langfile.stem for langfile in self.root.iterdir()})\n",
|
515 |
+
" self.labels_dict = {label: i for i, label in enumerate(self.labels)}\n",
|
516 |
+
" self.encoder = CharByteEncoder()\n",
|
517 |
+
" self.samples = self.construct_samples()\n",
|
518 |
+
"\n",
|
519 |
+
" def __getitem__(self, i):\n",
|
520 |
+
" return self.samples[i]\n",
|
521 |
+
"\n",
|
522 |
+
" def __len__(self):\n",
|
523 |
+
" return len(self.samples)\n",
|
524 |
+
"\n",
|
525 |
+
" def construct_samples(self):\n",
|
526 |
+
" samples = []\n",
|
527 |
+
" for langfile in self.root.iterdir():\n",
|
528 |
+
" label_name = langfile.stem\n",
|
529 |
+
" label_id = self.labels_dict[label_name]\n",
|
530 |
+
" with open(langfile, \"r\") as fin:\n",
|
531 |
+
" for row in fin:\n",
|
532 |
+
" samples.append(\n",
|
533 |
+
" (self.encoder(row.strip()), torch.tensor(label_id).long())\n",
|
534 |
+
" )\n",
|
535 |
+
" return samples\n",
|
536 |
+
"\n",
|
537 |
+
" def label_count(self):\n",
|
538 |
+
" cnt = Counter()\n",
|
539 |
+
" for _x, y in self.samples:\n",
|
540 |
+
" label = self.labels[int(y)]\n",
|
541 |
+
" cnt[label] += 1\n",
|
542 |
+
" return cnt\n",
|
543 |
+
"\n",
|
544 |
+
"\n",
|
545 |
+
"VOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters."
|
546 |
+
]
|
547 |
+
},
|
548 |
+
{
|
549 |
+
"cell_type": "code",
|
550 |
+
"execution_count": 81,
|
551 |
+
"metadata": {},
|
552 |
+
"outputs": [],
|
553 |
+
"source": [
|
554 |
+
"# Data Loaders\n",
|
555 |
+
"from torch.utils.data import DataLoader\n",
|
556 |
+
"\n",
|
557 |
+
"train_loader = DataLoader(\n",
|
558 |
+
" train_ds,\n",
|
559 |
+
" batch_size=batch_size,\n",
|
560 |
+
" pin_memory=True,\n",
|
561 |
+
" collate_fn=padded_collate,\n",
|
562 |
+
")\n",
|
563 |
+
"\n",
|
564 |
+
"test_loader = DataLoader(\n",
|
565 |
+
" test_ds,\n",
|
566 |
+
" batch_size=2 * batch_size,\n",
|
567 |
+
" shuffle=False,\n",
|
568 |
+
" pin_memory=True,\n",
|
569 |
+
" collate_fn=padded_collate,\n",
|
570 |
+
")"
|
571 |
+
]
|
572 |
+
},
|
573 |
+
{
|
574 |
+
"cell_type": "code",
|
575 |
+
"execution_count": 85,
|
576 |
+
"metadata": {},
|
577 |
+
"outputs": [],
|
578 |
+
"source": [
|
579 |
+
"import pandas as pd\n",
|
580 |
+
"\n",
|
581 |
+
"df = pd.read_csv(\"hf://datasets/synavate/medical_records_did/data_did.csv\")"
|
582 |
+
]
|
583 |
+
},
|
584 |
+
{
|
585 |
+
"cell_type": "code",
|
586 |
+
"execution_count": 93,
|
587 |
+
"metadata": {},
|
588 |
+
"outputs": [
|
589 |
+
{
|
590 |
+
"data": {
|
591 |
+
"text/plain": [
|
592 |
+
"Index(['last_name', 'first_name', 'full_name', 'birthday', 'gender', 'type',\n",
|
593 |
+
" 'state', 'district', 'senate_class', 'party', 'url', 'address', 'phone',\n",
|
594 |
+
" 'contact_form', 'rss_url', 'twitter', 'facebook', 'youtube',\n",
|
595 |
+
" 'youtube_id', 'bioguide_id', 'thomas_id', 'opensecrets_id', 'lis_id',\n",
|
596 |
+
" 'fec_ids', 'cspan_id', 'govtrack_id', 'votesmart_id', 'ballotpedia_id',\n",
|
597 |
+
" 'washington_post_id', 'icpsr_id', 'wikipedia_id', 'last_name.1',\n",
|
598 |
+
" 'first_name.1', 'middle_name.1', 'suffix.1', 'nickname.1',\n",
|
599 |
+
" 'full_name.1', 'birthday.1', 'gender.1', 'type.1', 'state.1',\n",
|
600 |
+
" 'district.1', 'senate_class.1', 'party.1', 'url.1', 'address.1',\n",
|
601 |
+
" 'phone.1', 'contact_form.1', 'rss_url.1', 'twitter.1', 'facebook.1',\n",
|
602 |
+
" 'youtube.1', 'youtube_id.1', 'bioguide_id.1', 'thomas_id.1',\n",
|
603 |
+
" 'opensecrets_id.1', 'lis_id.1', 'fec_ids.1', 'cspan_id.1',\n",
|
604 |
+
" 'govtrack_id.1', 'votesmart_id.1', 'ballotpedia_id.1',\n",
|
605 |
+
" 'washington_post_id.1', 'icpsr_id.1', 'wikipedia_id.1'],\n",
|
606 |
+
" dtype='object')"
|
607 |
+
]
|
608 |
+
},
|
609 |
+
"execution_count": 93,
|
610 |
+
"metadata": {},
|
611 |
+
"output_type": "execute_result"
|
612 |
+
}
|
613 |
+
],
|
614 |
+
"source": [
|
615 |
+
"df_drop=df.copy()\n",
|
616 |
+
"df_drop.isnull().drop(index=1)\n",
|
617 |
+
"df_drop.isna().sum()\n",
|
618 |
+
"df_drop.drop(columns=['Unnamed: 0', 'middle_name', 'suffix', 'nickname'], inplace=True)\n",
|
619 |
+
"df_drop.columns"
|
620 |
+
]
|
621 |
+
},
|
622 |
+
{
|
623 |
+
"cell_type": "code",
|
624 |
+
"execution_count": null,
|
625 |
+
"metadata": {},
|
626 |
+
"outputs": [],
|
627 |
+
"source": []
|
628 |
+
},
|
629 |
+
{
|
630 |
+
"cell_type": "code",
|
631 |
+
"execution_count": null,
|
632 |
+
"metadata": {},
|
633 |
+
"outputs": [],
|
634 |
+
"source": [
|
635 |
+
"!pip install scikit-learn\n",
|
636 |
+
"from sklearn import train_test_split\n",
|
637 |
+
"\n"
|
638 |
+
]
|
639 |
+
}
|
640 |
+
],
|
641 |
+
"metadata": {
|
642 |
+
"kernelspec": {
|
643 |
+
"display_name": "Python 3 (ipykernel)",
|
644 |
+
"language": "python",
|
645 |
+
"name": "python3"
|
646 |
+
},
|
647 |
+
"language_info": {
|
648 |
+
"codemirror_mode": {
|
649 |
+
"name": "ipython",
|
650 |
+
"version": 3
|
651 |
+
},
|
652 |
+
"file_extension": ".py",
|
653 |
+
"mimetype": "text/x-python",
|
654 |
+
"name": "python",
|
655 |
+
"nbconvert_exporter": "python",
|
656 |
+
"pygments_lexer": "ipython3",
|
657 |
+
"version": "3.11.9"
|
658 |
+
}
|
659 |
+
},
|
660 |
+
"nbformat": 4,
|
661 |
+
"nbformat_minor": 2
|
662 |
+
}
|
gretel_ai/gretel_exp.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Using Gretel AI to Redact PII
|
2 |
+
### Applying Gretel.ai's dfferential AI to the equivalent dataset
|
3 |
+
|
4 |
+
|
medical_records_did
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 7d84b291818a39727cd0e0dfe1ccad2c4bbda2dd
|
pyproject.toml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "verida-differential-privacy"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "A demonstration of using the PyTorch differential privacy library and a login DiD"
|
5 |
+
authors = ["snyata <[email protected]>"]
|
6 |
+
license = "MIT"
|
7 |
+
readme = "README.md"
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.10"
|
11 |
+
pydantic = "^2.8.2"
|
12 |
+
wandb = "^0.17.7"
|
13 |
+
python-dotenv = "^1.0.1"
|
14 |
+
ipykernel = "^6.29.5"
|
15 |
+
datasets = "^2.21.0"
|
16 |
+
didkit = "^0.3.3"
|
17 |
+
scikit-learn = "^1.5.1"
|
18 |
+
matplotlib = "^3.9.2"
|
19 |
+
seaborn = "^0.13.2"
|
20 |
+
|
21 |
+
[tool.poetry.group.dev.dependencies]
|
22 |
+
pytest = "8.3.2"
|
23 |
+
pip-tools = "^7.4.1"
|
24 |
+
isort = "^5.13.2"
|
25 |
+
datasets = "^2.21.0"
|
26 |
+
jupyter = "^1.0.0"
|
27 |
+
|
28 |
+
[build-system]
|
29 |
+
requires = ["poetry-core"]
|
30 |
+
build-backend = "poetry.core.masonry.api"
|
src/lstm.ipynb
ADDED
File without changes
|
transformer_models/unsloth_model_colab.ipynb
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# PyTorch Differential Privacy Experiment"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": null,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"%pip install -qqq torch torchvision opacus numpy pandas\n",
|
17 |
+
"%pip install -qqq wandb datasets tqdm\n"
|
18 |
+
]
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"cell_type": "code",
|
22 |
+
"execution_count": null,
|
23 |
+
"metadata": {},
|
24 |
+
"outputs": [],
|
25 |
+
"source": [
|
26 |
+
"import os\n",
|
27 |
+
"import torch \n",
|
28 |
+
"from dotenv import load_dotenv\n",
|
29 |
+
"import wandb \n",
|
30 |
+
"import logging\n",
|
31 |
+
"import shutil\n",
|
32 |
+
"import sys\n",
|
33 |
+
"from datetime import datetime, timedelta\n",
|
34 |
+
"\n",
|
35 |
+
"import argparse\n",
|
36 |
+
"from collections import Counter\n",
|
37 |
+
"from pathlib import Path\n",
|
38 |
+
"from statistics import mean\n",
|
39 |
+
"\n",
|
40 |
+
"import torch\n",
|
41 |
+
"import torch.nn as nn\n",
|
42 |
+
"from opacus import PrivacyEngine\n",
|
43 |
+
"from opacus.layers import DPGRU, DPLSTM, DPRNN\n",
|
44 |
+
"from torch.nn.utils.rnn import pad_sequence\n",
|
45 |
+
"from torch.utils.data import DataLoader, Dataset\n",
|
46 |
+
"from tqdm import tqdm, tqdm_notebook"
|
47 |
+
]
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"cell_type": "code",
|
51 |
+
"execution_count": null,
|
52 |
+
"metadata": {},
|
53 |
+
"outputs": [],
|
54 |
+
"source": [
|
55 |
+
"device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n",
|
56 |
+
"if os.path.exists('.env'):\n",
|
57 |
+
" load_dotenv('.env')\n",
|
58 |
+
"device\n"
|
59 |
+
]
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"cell_type": "code",
|
63 |
+
"execution_count": null,
|
64 |
+
"metadata": {},
|
65 |
+
"outputs": [],
|
66 |
+
"source": [
|
67 |
+
"logging.basicConfig(\n",
|
68 |
+
" format=\"%(asctime)s:%(levelname)s:%(message)s\",\n",
|
69 |
+
" datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
|
70 |
+
" stream=sys.stdout,\n",
|
71 |
+
")\n",
|
72 |
+
"logger = logging.getLogger(\"ddp\")\n",
|
73 |
+
"logger.setLevel(level=logging.INFO)\n"
|
74 |
+
]
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"cell_type": "code",
|
78 |
+
"execution_count": null,
|
79 |
+
"metadata": {},
|
80 |
+
"outputs": [],
|
81 |
+
"source": [
|
82 |
+
"wandb.login(key=os.getenv('WANDB_API_KEY'))\n",
|
83 |
+
"wandb.init(project=\"verida-pii\", name=\"deberta_finetune\")"
|
84 |
+
]
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"cell_type": "markdown",
|
88 |
+
"metadata": {},
|
89 |
+
"source": [
|
90 |
+
"# Fine Tuning w/ Unsloth (Colab Only)"
|
91 |
+
]
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"cell_type": "code",
|
95 |
+
"execution_count": null,
|
96 |
+
"metadata": {},
|
97 |
+
"outputs": [],
|
98 |
+
"source": [
|
99 |
+
"# Datasets\n",
|
100 |
+
"# Original data_name = 'Ezi/medical_and_legislators_synthetic'\n",
|
101 |
+
"# Tutorial: https://huggingface.co/blog/Andyrasika/finetune-unsloth-qlora\n"
|
102 |
+
]
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"cell_type": "code",
|
106 |
+
"execution_count": null,
|
107 |
+
"metadata": {},
|
108 |
+
"outputs": [],
|
109 |
+
"source": [
|
110 |
+
"# Get the major and minor version of the current CUDA device (GPU)\n",
|
111 |
+
"major_version, minor_version = torch.cuda.get_device_capability()\n",
|
112 |
+
"\n",
|
113 |
+
"# Apply the following if the GPU has Ampere or Hopper architecture (RTX 30xx, RTX 40xx, A100, H100, L40, etc.)\n",
|
114 |
+
"if major_version >= 8:\n",
|
115 |
+
" # Install the Unsloth library for Ampere and Hopper architecture from GitHub\n",
|
116 |
+
" !pip install \"unsloth[colab_ampere] @ git+https://github.com/unslothai/unsloth.git\" -q\n",
|
117 |
+
"\n",
|
118 |
+
"# Apply the following for older GPUs (V100, Tesla T4, RTX 20xx, etc.)\n",
|
119 |
+
"else:\n",
|
120 |
+
" # Install the Unsloth library for older GPUs from GitHub\n",
|
121 |
+
" !pip install \"unsloth[colab] @ git+https://github.com/unslothai/unsloth.git\" -q\n",
|
122 |
+
"\n",
|
123 |
+
"# Placeholder statement (does nothing)\n",
|
124 |
+
"pass\n",
|
125 |
+
"\n",
|
126 |
+
"# Install the Hugging Face Transformers library from GitHub, which allows native 4-bit loading\n",
|
127 |
+
"!pip install \"git+https://github.com/huggingface/transformers.git\" -q\n",
|
128 |
+
"\n"
|
129 |
+
]
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"cell_type": "code",
|
133 |
+
"execution_count": null,
|
134 |
+
"metadata": {},
|
135 |
+
"outputs": [],
|
136 |
+
"source": [
|
137 |
+
"from transformers import AutoTokenizer, AutoModelForTokenClassification\n",
|
138 |
+
"\n",
|
139 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"lakshyakh93/deberta_finetuned_pii\")\n",
|
140 |
+
"model = AutoModelForTokenClassification.from_pretrained(\"lakshyakh93/deberta_finetuned_pii\")"
|
141 |
+
]
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"cell_type": "code",
|
145 |
+
"execution_count": null,
|
146 |
+
"metadata": {},
|
147 |
+
"outputs": [],
|
148 |
+
"source": [
|
149 |
+
"model = FastLanguageModel.get_peft_model(\n",
|
150 |
+
" model,\n",
|
151 |
+
" # Specify the existing model\n",
|
152 |
+
"\n",
|
153 |
+
" r=16, # Choose any positive number! Recommended values include 8, 16, 32, 64, 128, etc.\n",
|
154 |
+
" # Rank parameter for LoRA. The smaller this value, the fewer parameters will be modified.\n",
|
155 |
+
"\n",
|
156 |
+
" target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
|
157 |
+
" \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
|
158 |
+
" # Specify the modules to which LoRA will be applied\n",
|
159 |
+
"\n",
|
160 |
+
" lora_alpha=16,\n",
|
161 |
+
" # Alpha parameter for LoRA. This value determines the strength of the applied LoRA.\n",
|
162 |
+
"\n",
|
163 |
+
" lora_dropout=0, # Currently, only supports dropout = 0\n",
|
164 |
+
" # Dropout rate for LoRA. Currently supports only 0.\n",
|
165 |
+
"\n",
|
166 |
+
" bias=\"none\", # Currently, only supports bias = \"none\"\n",
|
167 |
+
" # Bias usage setting. Currently supports only the setting without bias.\n",
|
168 |
+
"\n",
|
169 |
+
" use_gradient_checkpointing=True,\n",
|
170 |
+
" # Whether to use gradient checkpointing to improve memory efficiency\n",
|
171 |
+
"\n",
|
172 |
+
" random_state=3407,\n",
|
173 |
+
" # Seed value for random number generation\n",
|
174 |
+
"\n",
|
175 |
+
" max_seq_length=max_seq_length,\n",
|
176 |
+
" # Set the maximum sequence length\n",
|
177 |
+
")"
|
178 |
+
]
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"cell_type": "code",
|
182 |
+
"execution_count": null,
|
183 |
+
"metadata": {},
|
184 |
+
"outputs": [],
|
185 |
+
"source": [
|
186 |
+
"# @TODO - Add the relevant prompt\n",
|
187 |
+
"alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n",
|
188 |
+
"\n",
|
189 |
+
"### Instruction:\n",
|
190 |
+
"{}\n",
|
191 |
+
"\n",
|
192 |
+
"### Input:\n",
|
193 |
+
"{}\n",
|
194 |
+
"\n",
|
195 |
+
"### Response:\n",
|
196 |
+
"{}\"\"\"\n",
|
197 |
+
"# Define the prompt format for the Alpaca dataset\n",
|
198 |
+
"\n",
|
199 |
+
"def formatting_prompts_func(examples):\n",
|
200 |
+
" # Define a function to format each example in the dataset\n",
|
201 |
+
"\n",
|
202 |
+
" instructions = examples[\"instruction\"]\n",
|
203 |
+
" inputs = examples[\"input\"]\n",
|
204 |
+
" outputs = examples[\"output\"]\n",
|
205 |
+
" # Get instructions, inputs, and outputs\n",
|
206 |
+
"\n",
|
207 |
+
" texts = []\n",
|
208 |
+
" for instruction, input, output in zip(instructions, inputs, outputs):\n",
|
209 |
+
" # Generate text by combining instructions, inputs, and outputs\n",
|
210 |
+
"\n",
|
211 |
+
" text = alpaca_prompt.format(instruction, input, output)\n",
|
212 |
+
" # Format the text according to the prompt format\n",
|
213 |
+
"\n",
|
214 |
+
" texts.append(text)\n",
|
215 |
+
" return { \"text\" : texts, }\n",
|
216 |
+
" # Return a list of formatted texts\n",
|
217 |
+
"\n",
|
218 |
+
"pass\n",
|
219 |
+
"# Placeholder (does nothing)\n",
|
220 |
+
"\n",
|
221 |
+
"from datasets import load_dataset\n",
|
222 |
+
"# Import the load_dataset function from the datasets library\n",
|
223 |
+
"\n",
|
224 |
+
"dataset = load_dataset(\"yahma/alpaca-cleaned\", split=\"train\")\n",
|
225 |
+
"# Load the training data of the cleaned version of the Alpaca dataset from yahma\n",
|
226 |
+
"\n",
|
227 |
+
"dataset = dataset.map(formatting_prompts_func, batched=True,)"
|
228 |
+
]
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"cell_type": "code",
|
232 |
+
"execution_count": null,
|
233 |
+
"metadata": {},
|
234 |
+
"outputs": [],
|
235 |
+
"source": [
|
236 |
+
"from trl import SFTTrainer\n",
|
237 |
+
"# Import SFTTrainer from the TRL library\n",
|
238 |
+
"\n",
|
239 |
+
"from transformers import TrainingArguments\n",
|
240 |
+
"# Import TrainingArguments from the Transformers library\n",
|
241 |
+
"\n",
|
242 |
+
"trainer = SFTTrainer(\n",
|
243 |
+
" # Initialize the SFTTrainer\n",
|
244 |
+
"\n",
|
245 |
+
" model=model,\n",
|
246 |
+
" # Specify the model to be used\n",
|
247 |
+
"\n",
|
248 |
+
" train_dataset=dataset,\n",
|
249 |
+
" # Specify the training dataset\n",
|
250 |
+
"\n",
|
251 |
+
" dataset_text_field=\"text\",\n",
|
252 |
+
" # Specify the text field in the dataset\n",
|
253 |
+
"\n",
|
254 |
+
" max_seq_length=max_seq_length,\n",
|
255 |
+
" # Specify the maximum sequence length\n",
|
256 |
+
"\n",
|
257 |
+
" args=TrainingArguments(\n",
|
258 |
+
" # Specify training arguments\n",
|
259 |
+
"\n",
|
260 |
+
" per_device_train_batch_size=2,\n",
|
261 |
+
" # Specify the training batch size per device\n",
|
262 |
+
"\n",
|
263 |
+
" gradient_accumulation_steps=4,\n",
|
264 |
+
" # Specify the number of steps for gradient accumulation\n",
|
265 |
+
"\n",
|
266 |
+
" warmup_steps=5,\n",
|
267 |
+
" # Specify the number of warm-up steps\n",
|
268 |
+
"\n",
|
269 |
+
" max_steps=20,\n",
|
270 |
+
" # Specify the maximum number of steps\n",
|
271 |
+
"\n",
|
272 |
+
" learning_rate=2e-4,\n",
|
273 |
+
" # Specify the learning rate\n",
|
274 |
+
"\n",
|
275 |
+
" fp16=not torch.cuda.is_bf16_supported(),\n",
|
276 |
+
" # Set whether to use 16-bit floating-point precision (fp16)\n",
|
277 |
+
"\n",
|
278 |
+
" bf16=torch.cuda.is_bf16_supported(),\n",
|
279 |
+
" # Set whether to use Bfloat16\n",
|
280 |
+
"\n",
|
281 |
+
" logging_steps=1,\n",
|
282 |
+
" # Specify the logging steps\n",
|
283 |
+
"\n",
|
284 |
+
" optim=\"adamw_8bit\",\n",
|
285 |
+
" # Specify the optimizer (here using 8-bit AdamW)\n",
|
286 |
+
"\n",
|
287 |
+
" weight_decay=0.01,\n",
|
288 |
+
" # Specify the weight decay value\n",
|
289 |
+
"\n",
|
290 |
+
" lr_scheduler_type=\"linear\",\n",
|
291 |
+
" # Specify the type of learning rate scheduler (linear)\n",
|
292 |
+
"\n",
|
293 |
+
" seed=3407,\n",
|
294 |
+
" # Specify the random seed\n",
|
295 |
+
"\n",
|
296 |
+
" output_dir=\"outputs\",\n",
|
297 |
+
" # Specify the output directory\n",
|
298 |
+
"\n",
|
299 |
+
" ),\n",
|
300 |
+
")"
|
301 |
+
]
|
302 |
+
},
|
303 |
+
{
|
304 |
+
"cell_type": "code",
|
305 |
+
"execution_count": null,
|
306 |
+
"metadata": {},
|
307 |
+
"outputs": [],
|
308 |
+
"source": [
|
309 |
+
"gpu_stats = torch.cuda.get_device_properties(0)\n",
|
310 |
+
"# Get properties of the GPU device at index 0\n",
|
311 |
+
"\n",
|
312 |
+
"start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n",
|
313 |
+
"# Get the maximum reserved GPU memory in GB and round to 3 decimal places\n",
|
314 |
+
"\n",
|
315 |
+
"max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n",
|
316 |
+
"# Get the total GPU memory in GB and round to 3 decimal places\n",
|
317 |
+
"\n",
|
318 |
+
"print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n",
|
319 |
+
"# Display the GPU name and maximum memory\n",
|
320 |
+
"\n",
|
321 |
+
"print(f\"{start_gpu_memory} GB of memory reserved.\")\n",
|
322 |
+
"# Display the reserved memory amount"
|
323 |
+
]
|
324 |
+
},
|
325 |
+
{
|
326 |
+
"cell_type": "code",
|
327 |
+
"execution_count": null,
|
328 |
+
"metadata": {},
|
329 |
+
"outputs": [],
|
330 |
+
"source": [
|
331 |
+
"trainer_stats = trainer.train()"
|
332 |
+
]
|
333 |
+
},
|
334 |
+
{
|
335 |
+
"cell_type": "markdown",
|
336 |
+
"metadata": {},
|
337 |
+
"source": [
|
338 |
+
"# Convert to GGUF"
|
339 |
+
]
|
340 |
+
},
|
341 |
+
{
|
342 |
+
"cell_type": "code",
|
343 |
+
"execution_count": null,
|
344 |
+
"metadata": {},
|
345 |
+
"outputs": [],
|
346 |
+
"source": [
|
347 |
+
"def colab_quantize_to_gguf(save_directory, quantization_method=\"q4_k_m\"):\n",
|
348 |
+
" # Define a function for conversion to GGUF\n",
|
349 |
+
"\n",
|
350 |
+
" from transformers.models.llama.modeling_llama import logger\n",
|
351 |
+
" import os\n",
|
352 |
+
" # Import necessary libraries\n",
|
353 |
+
"\n",
|
354 |
+
" logger.warning_once(\n",
|
355 |
+
" \"Unsloth: `colab_quantize_to_gguf` is still in development mode.\\n\"\\\n",
|
356 |
+
" \"If anything errors or breaks, please file a ticket on Github.\\n\"\\\n",
|
357 |
+
" \"Also, if you used this successfully, please tell us on Discord!\"\n",
|
358 |
+
" )\n",
|
359 |
+
" # Warn that it's still in development mode and encourage reporting any issues\n",
|
360 |
+
"\n",
|
361 |
+
" # From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html\n",
|
362 |
+
" ALLOWED_QUANTS = \\\n",
|
363 |
+
" {\n",
|
364 |
+
" # Define currently allowed quantization methods\n",
|
365 |
+
" # Including descriptions for each quantization method\n",
|
366 |
+
" \"q2_k\" : \"Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.\",\n",
|
367 |
+
" \"q3_k_l\" : \"Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K\",\n",
|
368 |
+
" \"q3_k_m\" : \"Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K\",\n",
|
369 |
+
" \"q3_k_s\" : \"Uses Q3_K for all tensors\",\n",
|
370 |
+
" \"q4_0\" : \"Original quant method, 4-bit.\",\n",
|
371 |
+
" \"q4_1\" : \"Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.\",\n",
|
372 |
+
" \"q4_k_m\" : \"Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K\",\n",
|
373 |
+
" \"q4_k_s\" : \"Uses Q4_K for all tensors\",\n",
|
374 |
+
" \"q5_0\" : \"Higher accuracy, higher resource usage and slower inference.\",\n",
|
375 |
+
" \"q5_1\" : \"Even higher accuracy, resource usage and slower inference.\",\n",
|
376 |
+
" \"q5_k_m\" : \"Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K\",\n",
|
377 |
+
" \"q5_k_s\" : \"Uses Q5_K for all tensors\",\n",
|
378 |
+
" \"q6_k\" : \"Uses Q8_K for all tensors\",\n",
|
379 |
+
" \"q8_0\" : \"Almost indistinguishable from float16. High resource use and slow. Not recommended for most users.\",\n",
|
380 |
+
" }\n",
|
381 |
+
"\n",
|
382 |
+
" if quantization_method not in ALLOWED_QUANTS.keys():\n",
|
383 |
+
" # If the specified quantization method is not allowed, raise an error\n",
|
384 |
+
" error = f\"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\\n\"\n",
|
385 |
+
" for key, value in ALLOWED_QUANTS.items():\n",
|
386 |
+
" error += f\"[{key}] => {value}\\n\"\n",
|
387 |
+
" raise RuntimeError(error)\n",
|
388 |
+
"\n",
|
389 |
+
" # Display information about the conversion\n",
|
390 |
+
" print_info = \\\n",
|
391 |
+
" f\"==((====))== Unsloth: Conversion from QLoRA to GGUF information\\n\"\\\n",
|
392 |
+
" f\" \\\\\\ /| [0] Installing llama.cpp will take 3 minutes.\\n\"\\\n",
|
393 |
+
" f\"O^O/ \\_/ \\\\ [1] Converting HF to GUUF 16bits will take 3 minutes.\\n\"\\\n",
|
394 |
+
" f\"\\ / [2] Converting GGUF 16bits to q4_k_m will take 20 minutes.\\n\"\\\n",
|
395 |
+
" f' \"-____-\" In total, you will have to wait around 26 minutes.\\n'\n",
|
396 |
+
" print(print_info)\n",
|
397 |
+
" # Display information about the conversion process\n",
|
398 |
+
"\n",
|
399 |
+
" if not os.path.exists(\"llama.cpp\"):\n",
|
400 |
+
" # If llama.cpp does not exist, install it\n",
|
401 |
+
" print(\"Unsloth: [0] Installing llama.cpp. This will take 3 minutes...\")\n",
|
402 |
+
" !git clone https://github.com/ggerganov/llama.cpp\n",
|
403 |
+
" !cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j\n",
|
404 |
+
" !pip install gguf protobuf\n",
|
405 |
+
" pass\n",
|
406 |
+
"\n",
|
407 |
+
" print(\"Unsloth: Starting conversion from HF to GGUF 16bit...\")\n",
|
408 |
+
" # Display that conversion from HF to GGUF 16bit is starting\n",
|
409 |
+
" # print(\"Unsloth: [1] Converting HF into GGUF 16bit. This will take 3 minutes...\")\n",
|
410 |
+
" !python llama.cpp/convert.py {save_directory} \\\n",
|
411 |
+
" --outfile {save_directory}-unsloth.gguf \\\n",
|
412 |
+
" --outtype f16\n",
|
413 |
+
"\n",
|
414 |
+
" print(\"Unsloth: Starting conversion from GGUF 16bit to q4_k_m...\")\n",
|
415 |
+
" # Display that conversion from GGUF 16bit to the specified quantization method is starting\n",
|
416 |
+
" # print(\"Unsloth: [2] Converting GGUF 16bit into q4_k_m. This will take 20 minutes...\")\n",
|
417 |
+
" final_location = f\"./{save_directory}-{quantization_method}-unsloth.gguf\"\n",
|
418 |
+
" !./llama.cpp/quantize ./{save_directory}-unsloth.gguf \\\n",
|
419 |
+
" {final_location} {quantization_method}\n",
|
420 |
+
"\n",
|
421 |
+
" print(f\"Unsloth: Output location: {final_location}\")\n",
|
422 |
+
" # Display the output location of the converted file\n",
|
423 |
+
"pass"
|
424 |
+
]
|
425 |
+
},
|
426 |
+
{
|
427 |
+
"cell_type": "code",
|
428 |
+
"execution_count": null,
|
429 |
+
"metadata": {},
|
430 |
+
"outputs": [],
|
431 |
+
"source": [
|
432 |
+
"from unsloth import unsloth_save_model\n",
|
433 |
+
"# Import the unsloth_save_model function from the Unsloth library\n",
|
434 |
+
"\n",
|
435 |
+
"# unsloth_save_model has the same args as model.save_pretrained\n",
|
436 |
+
"# unsloth_save_model has the same arguments as model.save_pretrained\n",
|
437 |
+
"unsloth_save_model(model, tokenizer, \"output_model\", push_to_hub=False, token=None)\n",
|
438 |
+
"# Save the model and tokenizer as \"output_model\". Do not push to the Hugging Face Hub\n",
|
439 |
+
"\n",
|
440 |
+
"colab_quantize_to_gguf(\"output_model\", quantization_method=\"q4_k_m\")\n",
|
441 |
+
"# Convert \"output_model\" to GGUF format. Use the quantization method \"q4_k_m\""
|
442 |
+
]
|
443 |
+
}
|
444 |
+
],
|
445 |
+
"metadata": {
|
446 |
+
"kernelspec": {
|
447 |
+
"display_name": "Python 3 (ipykernel)",
|
448 |
+
"language": "python",
|
449 |
+
"name": "python3"
|
450 |
+
},
|
451 |
+
"language_info": {
|
452 |
+
"codemirror_mode": {
|
453 |
+
"name": "ipython",
|
454 |
+
"version": 3
|
455 |
+
},
|
456 |
+
"file_extension": ".py",
|
457 |
+
"mimetype": "text/x-python",
|
458 |
+
"name": "python",
|
459 |
+
"nbconvert_exporter": "python",
|
460 |
+
"pygments_lexer": "ipython3",
|
461 |
+
"version": "3.11.9"
|
462 |
+
}
|
463 |
+
},
|
464 |
+
"nbformat": 4,
|
465 |
+
"nbformat_minor": 2
|
466 |
+
}
|