-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstacking_experiments.py
More file actions
217 lines (153 loc) · 10.2 KB
/
stacking_experiments.py
File metadata and controls
217 lines (153 loc) · 10.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import argparse
import logging
import os
from cuml.preprocessing import StandardScaler
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.utils import compute_class_weight
from classifier_vs_feature_experiment import get_tuned_classifiers
from src.util.data_paths import get_data_path
"""
Code for the "Classifier Stacking Approaches" experiments, Section 6.3.1,
results were manually retrieved from logs
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Score Fusion Stacking Experiments')
parser.add_argument('--experiment-dir', type=str, help='Directory to checkpoint file',
default='/local/scratch/ptanner/stacking_experiments')
#parser.add_argument('--feature', type=str, help='Feature to use for training', default='pdm')
args = parser.parse_args()
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
handlers=[
logging.FileHandler(f'{args.experiment_dir}/experiment.log'),
logging.StreamHandler()
])
logger.info("Starting Experiment")
y_train = np.load('y_train.npy')
y_val = np.load('y_val.npy')
y_test = np.load('y_test.npy')
class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
class_weights = {i: class_weights[i] for i in range(len(class_weights))}
features = ['landmarks_3d', 'hog', 'nonrigid_face_shape','facs', 'embeddings']
#classifier_names = ['LogisticRegression', 'NN', 'SVC', 'MLP', 'LinearSVC', 'RandomForest']
classifier_names = ['LogisticRegression', 'NN', 'SVC', 'MLP', 'RandomForest']
if 'single_feature_results.npy' not in os.listdir(args.experiment_dir):
single_feature_results = {feature: {} for feature in features}
else:
single_feature_results = np.load(f'{args.experiment_dir}/single_feature_results.npy', allow_pickle=True).item()
if 'single_classifier_results.npy' not in os.listdir(args.experiment_dir):
single_classifier_results = {clf_name: {} for clf_name in classifier_names}
else:
single_classifier_results = np.load(f'{args.experiment_dir}/single_classifier_results.npy', allow_pickle=True).item()
#single_feature_results = {feature: {} for feature in ['nonrigid_face_shape','hog','landmarks_3d','facs', 'embedded']}
#single_classifier_results = {clf_name: {} for clf_name in ['LogisticRegression', 'NN', 'SVC', 'MLP', 'LinearSVC', 'RandomForest']}
del single_feature_results['embedded']
single_feature_results['embeddings'] = {}
for clf_name in classifier_names:
del single_classifier_results[clf_name]['embedded']
if 'predicted_probabilities_val.npy' not in os.listdir(args.experiment_dir):
predicted_probabilities_val = {clf_name: {feature: None for feature in features} for clf_name in classifier_names}
predicted_probabilities_test = {clf_name: {feature: None for feature in features} for clf_name in classifier_names}
else:
predicted_probabilities_val = np.load(f'{args.experiment_dir}/predicted_probabilities_val.npy', allow_pickle=True).item()
predicted_probabilities_test = np.load(f'{args.experiment_dir}/predicted_probabilities_test.npy', allow_pickle=True).item()
for clf_name in classifier_names:
del predicted_probabilities_val[clf_name]['embedded']
del predicted_probabilities_test[clf_name]['embedded']
predicted_probabilities_val[clf_name]['embeddings'] = None
predicted_probabilities_test[clf_name]['embeddings'] = None
def single_feature_experiment(feature):
logger.info(f"Training on feature: {feature}")
# Load data
train_data = np.load(get_data_path('train', feature)).astype(np.float32)
val_data = np.load(get_data_path('val', feature)).astype(np.float32)
test_data = np.load(get_data_path('test', feature)).astype(np.float32)
probabilities_val = {}
probabilities_test = {}
classifier_dict = get_tuned_classifiers(feature, class_weights, test_data.shape[1])
# Train and evaluate classifiers
for clf_name, classifier in classifier_dict.items():
logger.info(f"Training {clf_name} on {feature}")
# Check if predictions have already been made for this feature-classifier pair
if predicted_probabilities_val[clf_name][feature] is not None:
probabilities_val[clf_name] = predicted_probabilities_val[clf_name][feature]
probabilities_test[clf_name] = predicted_probabilities_test[clf_name][feature]
logger.info(f"Found predictions for {feature}, skipping...")
logger.info(f"Balanced Accuracy {clf_name}: {balanced_accuracy_score(y_val, np.argmax(probabilities_val[clf_name], axis=1))} / {balanced_accuracy_score(y_test, np.argmax(probabilities_test[clf_name], axis=1))}")
continue
classifier.fit(train_data, y_train)
proba_val = classifier.predict_proba(val_data)
proba_test = classifier.predict_proba(test_data)
bal_acc_val = balanced_accuracy_score(y_val, np.argmax(proba_val, axis=1))
bal_acc_test = balanced_accuracy_score(y_test, np.argmax(proba_test, axis=1))
logger.info(f"Balanced Accuracy {clf_name}: {bal_acc_val} / {bal_acc_test}")
single_feature_results[feature][clf_name] = bal_acc_test
probabilities_val[clf_name] = proba_val
probabilities_test[clf_name] = proba_test
predicted_probabilities_val[clf_name][feature] = proba_val
predicted_probabilities_test[clf_name][feature] = proba_test
# Reset Model to save memory
classifier_dict[clf_name] = 'Done'
# Use probabilities as input to the stacking classifier
X_stack = np.concatenate([probabilities_val[model] for model in probabilities_val], axis=1)
stacking_pipeline = Pipeline(
[('scaler', StandardScaler()), ('log_reg', LogisticRegression(C=1, class_weight='balanced'))])
stacking_pipeline.fit(X_stack, y_val)
X_test_stack = np.concatenate([probabilities_test[model] for model in probabilities_test], axis=1)
bal_acc_stack = balanced_accuracy_score(y_test, stacking_pipeline.predict(X_test_stack))
logger.info(f"Stacking Performance: {stacking_pipeline.score(X_stack, y_val)}/{bal_acc_stack}")
single_feature_results[feature]['Stacking'] = bal_acc_stack
for feature in features:
single_feature_experiment(feature)
logger.info(f"Finished Experiment for {feature}")
np.save(f'{args.experiment_dir}/single_feature_results.npy', single_feature_results)
def single_classifier_experiment(clf_name):
logger.info(f"Training on classifier: {clf_name}")
probabilities_val = {}
probabilities_test = {}
for feature in features:
logger.info(f"Training on {feature}")
# Check if predictions have already been made for this feature-classifier pair
if predicted_probabilities_val[clf_name][feature] is not None:
probabilities_val[feature] = predicted_probabilities_val[clf_name][feature]
probabilities_test[feature] = predicted_probabilities_test[clf_name][feature]
logger.info(f"Found predictions for {feature}, skipping...")
logger.info(f"Balanced Accuracy {feature}: {balanced_accuracy_score(y_val, np.argmax(probabilities_val[feature], axis=1))} / {balanced_accuracy_score(y_test, np.argmax(probabilities_test[feature], axis=1))}")
single_classifier_results[clf_name][feature] = balanced_accuracy_score(y_test, np.argmax(probabilities_test[feature], axis=1))
continue
train_data = np.load(get_data_path('train', feature)).astype(np.float32)
val_data = np.load(get_data_path('val', feature)).astype(np.float32)
test_data = np.load(get_data_path('test', feature)).astype(np.float32)
classifier = get_tuned_classifiers(feature, class_weights, test_data.shape[1])[clf_name]
classifier.fit(train_data, y_train)
proba_val = classifier.predict_proba(val_data)
proba_test = classifier.predict_proba(test_data)
bal_acc_val = balanced_accuracy_score(y_val, np.argmax(proba_val, axis=1))
bal_acc_test = balanced_accuracy_score(y_test, np.argmax(proba_test, axis=1))
logger.info(f"Balanced Accuracy {feature}: {bal_acc_val} / {bal_acc_test}")
single_classifier_results[clf_name][feature] = bal_acc_test
probabilities_val[feature] = proba_val
probabilities_test[feature] = proba_test
predicted_probabilities_val[clf_name][feature] = proba_val
predicted_probabilities_test[clf_name][feature] = proba_test
# Use probabilities as input to the stacking classifier
X_stack = np.concatenate([probabilities_val[model] for model in probabilities_val], axis=1)
stacking_pipeline = Pipeline(
[('scaler', StandardScaler()), ('log_reg', LogisticRegression(C=1, class_weight='balanced'))])
stacking_pipeline.fit(X_stack, y_val)
X_test_stack = np.concatenate([probabilities_test[model] for model in probabilities_test], axis=1)
bal_acc_stack = balanced_accuracy_score(y_test, stacking_pipeline.predict(X_test_stack))
logger.info(f"Stacking Performance: {stacking_pipeline.score(X_stack, y_val)}/{bal_acc_stack}")
single_classifier_results[clf_name]['Stacking'] = bal_acc_stack
for clf_name in classifier_names:
single_classifier_experiment(clf_name)
logger.info(f"Finished Experiment for {clf_name}")
np.save(f'{args.experiment_dir}/single_classifier_results.npy', single_classifier_results)
np.save(f'{args.experiment_dir}/predicted_probabilities_val.npy', predicted_probabilities_val)
np.save(f'{args.experiment_dir}/predicted_probabilities_test.npy', predicted_probabilities_test)
logger.info("Experiment Finished")