-
Notifications
You must be signed in to change notification settings - Fork 20
/
preprocess_data.py
executable file
·108 lines (87 loc) · 4.1 KB
/
preprocess_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#!/usr/bin/python
import numpy as np
import cPickle as pkl
import os
import random
from scipy import delete
from sklearn.preprocessing import StandardScaler
from missing_data_imputation import Imputer
from processing import impute, perturb_data
from params import adult_params, scalers_folder
from params import feats_train_folder, labels_train_folder, perturb_folder
from params import rand_num_seed
np.random.seed(rand_num_seed)
random.seed(rand_num_seed)
# load features and labels
x = np.genfromtxt('data/adult-train-raw', delimiter=', ', dtype=object)
# remove holland from data to match feature size in test data
x = x[x[:, -2] != 'Holand-Netherlands']
# binarize labels
labels = (np.array(x[:, -1]) == '>50K').astype(int)
labels = labels.reshape((-1, 1))
# save labels in binary and one-hot representations
labels.dump(os.path.join(labels_train_folder, 'labels_bin.np'))
# remove redundant education-number and labels features
x = delete(x, (4, 14), 1)
# enumerate parameters
monotone = True
ratios = np.arange(0, .5, .1)
for ratio in ratios:
print '\nPerturbing {}% of data'.format(ratio)
if ratio > 0:
pert_data, _ = perturb_data(x, adult_params['cat_cols'], ratio, monotone,
adult_params['miss_data_symbol'],
adult_params['mnar_values'])
else:
pert_data = x
print "\tRatio is {} of {}".format(
np.sum(pert_data == adult_params['miss_data_symbol']),
len(pert_data) * len(adult_params['cat_cols']))
path = os.path.join(perturb_folder,
'adult_train_pert_mono_{}_ratio_{}.csv'.format(monotone,
ratio))
# save perturbed data to disk as csv
print '\tSaving perturbed data to {}'.format(path)
np.savetxt(path, pert_data, delimiter=",", fmt="%s")
for imp_method in adult_params['imp_methods']:
print '\tImputing with {}'.format(imp_method)
imp = Imputer()
data = impute(pert_data, imp, imp_method, adult_params)
path = "data/imputed/{}_mono_{}_ratio_{}.csv".format(imp_method,
monotone,
ratio)
# save data as csv
print '\tSaving imputed data to {}'.format(path)
np.savetxt(path, data, delimiter=",", fmt="%s")
# scale continuous variables and convert categorial to one-hot
# store the scaler objects to be used on the test set
scaler_path = os.path.join(scalers_folder,
"{}_scaler".format(imp_method))
if os.path.isfile(scaler_path):
scaler_dict = pkl.load(open(scaler_path, "rb"))
else:
scaler_dict = {}
scaler = StandardScaler()
scaler = scaler.fit(data[:, adult_params['non_cat_cols']].astype(float))
data_scaled = np.copy(data)
data_scaled[:, adult_params['non_cat_cols']] = scaler.transform(
data[:, adult_params['non_cat_cols']].astype(float))
# key is imputation method and ratio dependent
# filename is imputation method dependent
scaler_dict["{}_ratio_{}".format(imp_method, ratio)] = scaler
pkl.dump(scaler_dict, open(scaler_path, 'wb'))
# binarize scaled data
data_scaled_bin = imp.binarize_data(data_scaled,
adult_params['cat_cols'],
adult_params['miss_data_symbol'])
# convert to float
data_scaled_bin = data_scaled_bin.astype(float)
# add labels as last column
data_scaled_bin = np.hstack((data_scaled_bin, labels))
# save to disk
filename = "{}_bin_scaled_mono_{}_ratio_{}.np".format(imp_method,
monotone,
ratio)
path = os.path.join(feats_train_folder, filename)
print '\tSaving imputed scaled and binarized data to {}'.format(path)
data_scaled_bin.dump(path)