From 4122be21d68c12ab3936e1402d33c657d2f72144 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mario=20Sa=CC=88nger?= <mario.saenger@student.hu-berlin.de>
Date: Fri, 4 May 2018 23:42:12 +0200
Subject: [PATCH] Extend debugging options

---
 code_mario/clef18_task1.py    | 119 ++++++++++++++++++++++------------
 code_mario/dnn_classifiers.py |   6 +-
 code_mario/keras_extension.py |  31 +++++++--
 3 files changed, 108 insertions(+), 48 deletions(-)

diff --git a/code_mario/clef18_task1.py b/code_mario/clef18_task1.py
index 480a076..600866b 100644
--- a/code_mario/clef18_task1.py
+++ b/code_mario/clef18_task1.py
@@ -10,7 +10,7 @@ import os
 from argparse import Namespace
 from gensim.models import FastText
 from keras import Input, Model
-from keras.callbacks import ModelCheckpoint
+from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger
 from keras.layers import Bidirectional, Dense, Dot, LSTM, Embedding
 from keras.preprocessing.sequence import pad_sequences
 from keras.preprocessing.text import Tokenizer
@@ -90,8 +90,8 @@ class Clef18Task1V2(LoggingMixin):
                                            config.max_cert_length, config.max_dict_length)
         model.summary(print_fn=self.logger.info)
 
-        cert_inputs = pad_sequences(train_pair_data["Cert_input"].values, maxlen=config.max_cert_length)
-        dict_inputs = pad_sequences(train_pair_data["Dict_input"].values, maxlen=config.max_dict_length)
+        cert_inputs = pad_sequences(train_pair_data["Cert_input"].values, maxlen=config.max_cert_length, padding="post")
+        dict_inputs = pad_sequences(train_pair_data["Dict_input"].values, maxlen=config.max_dict_length, padding="post")
         labels = train_pair_data["Label"].values
 
         self.logger.info("Start training of embedding model")
@@ -177,45 +177,63 @@ class Clef18Task1V2(LoggingMixin):
         ]
 
         named_classifiers = [
-            ("KNN", lambda num_classes: KNeighborsClassifier()),
-            ("KNN-Cos", lambda num_classes: KNeighborsClassifier(metric="cosine")),
-            ("SGD", lambda num_classes: SGDClassifier(verbose=1, random_state=42)),
-            ("DT", lambda num_classes: DecisionTreeClassifier(random_state=42)),
-            ("RF", lambda num_classes: RandomForestClassifier(verbose=1, random_state=42)),
-            ("LinearSVM", lambda num_classes: LinearSVC(max_iter=10000, verbose=1, random_state=42)),
-
-            ("DNN-1-200", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200], False, 0.0, 50, 2,
-                                                                callbacks=[ku.best_model_checkpointing_by_model_name("dnn-1-200")])),
-            ("DNN-1-300", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [300], False, 0.0, 50, 2,
-                                                                callbacks=[ku.best_model_checkpointing_by_model_name("dnn-1-300")])),
-            ("DNN-200-BN-DO", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200], True, 0.5, 50, 2,
-                                                                  callbacks=[ku.best_model_checkpointing_by_model_name("dnn-1-200-bn-do")])),
-            ("DNN-300-BN-DO", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [300], True, 0.5, 50, 2,
-                                                                  callbacks=[ku.best_model_checkpointing_by_model_name("dnn-1-300-bn-do")])),
-
-            ("DNN-200-100", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200, 100], False, 0.0, 50, 2,
-                                                                  callbacks=[ku.best_model_checkpointing_by_model_name("dnn-200-100")])),
-            ("DNN-200-200", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200, 200], False, 0.0, 50, 2,
-                                                                  callbacks=[ku.best_model_checkpointing_by_model_name("dnn-200-200")])),
-            ("DNN-200-100-BN-DO", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200, 100], True, 0.5, 50, 2,
-                                                                        callbacks=[ku.best_model_checkpointing_by_model_name("dnn-200-100-bn-do")])),
-            ("DNN-200-200-BN-DO", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200, 200], True, 0.5, 50, 2,
-                                                                        callbacks=[ku.best_model_checkpointing_by_model_name("dnn-200-200-bn-do")])),
-
-            # ("Test-DNN-200-BN-DO", lambda num_classes: nnc.dense_network(cert_rnn.output.shape[1].value, num_classes, [200, 200], True, 0.5, 1, 4,
-            #                                                               callbacks=[ku.best_model_checkpointing_by_model_name("test-dnn-200-bn-do")])),
-
-            ('DU1', lambda num_classes: DummyClassifier(strategy="stratified")),
-            ('DU2', lambda num_classes: DummyClassifier(strategy="most_frequent"))
+            ("KNN", lambda label, input_dim, output_dim, val_data: KNeighborsClassifier()),
+            ("KNN-Cos", lambda label, input_dim, output_dim, val_data: KNeighborsClassifier(metric="cosine")),
+            ("SGD", lambda label, input_dim, output_dim, val_data: SGDClassifier(verbose=1, random_state=42)),
+            ("DT", lambda label, input_dim, output_dim, val_data: DecisionTreeClassifier(random_state=42)),
+            ("RF", lambda label, input_dim, output_dim, val_data: RandomForestClassifier(verbose=1, random_state=42)),
+            ("LinearSVM", lambda label, input_dim, output_dim, val_data: LinearSVC(max_iter=10000, verbose=1, random_state=42)),
+
+            ("DNN-200", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200],
+                                            batch_normalization=False, dropout_rate=0.0, epochs=10, batch_size=2)),
+            ("DNN-300", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-300", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[300],
+                                            batch_normalization=False, dropout_rate=0.0, epochs=50, batch_size=2)),
+
+            ("DNN-200-BN-DO", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200-bn-do", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200],
+                                            batch_normalization=True, dropout_rate=0.5, epochs=50, batch_size=2)),
+            ("DNN-300-BN-DO", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-300-bn-do", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[300],
+                                            batch_normalization=True, dropout_rate=0.5, epochs=50, batch_size=2)),
+
+            ("DNN-200-100", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200-100", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200, 100],
+                                            batch_normalization=False, dropout_rate=0.0, epochs=50, batch_size=2)),
+            ("DNN-200-200", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200-200", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200, 200],
+                                            batch_normalization=False, dropout_rate=0.0, epochs=50, batch_size=2)),
+            ("DNN-300-200", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-300-200", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[300, 200],
+                                            batch_normalization=False, dropout_rate=0.0, epochs=50, batch_size=2)),
+
+            ("DNN-200-100-BN-DO", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200-100-bn-do", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200, 100],
+                                            batch_normalization=True, dropout_rate=0.5, epochs=50, batch_size=2)),
+            ("DNN-200-200-BN-DO", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-200-200-bn-do", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[200, 200],
+                                            batch_normalization=True, dropout_rate=0.5, epochs=50, batch_size=2)),
+            ("DNN-300-200-BN-DO", lambda label, input_dim, output_dim, val_data:
+                self.create_dnn_classifier("dnn-300-200-bn-do", label, val_data, input_dim=input_dim, output_dim=output_dim, hidden_layer_sizes=[300, 200],
+                                            batch_normalization=True, dropout_rate=0.5, epochs=50, batch_size=2)),
+
+            ('DU1', lambda label, input_dim, output_dim, val_data: DummyClassifier(strategy="stratified")),
+            ('DU2', lambda label, input_dim, output_dim, val_data: DummyClassifier(strategy="most_frequent"))
         ]
 
         num_experiments = len(target_label_configs) * len(test_sets) * len(named_classifiers)
         cur_experiment = 1
 
+        input_dim = cert_rnn.output.shape[1].value
+
+        models_dir = os.path.join(AppContext.default().output_dir, "models")
+        os.makedirs(models_dir, exist_ok=True)
+
         results = []
         for target_label, target_column, label_encoder in target_label_configs:
             self.logger.info("Start evaluation experiments with label %s", target_label)
-            num_classes = len(label_encoder.classes_)
+            output_dim = len(label_encoder.classes_)
 
             complete_train_data = np.append(dict_embeddings, train_cert_embeddings, axis=0)
             complete_train_labels = np.append(config.dict_df[target_column].values, config.train_cert_df[target_column].values, axis=0)
@@ -223,11 +241,11 @@ class Clef18Task1V2(LoggingMixin):
 
             for cl_name, classifier_factory in named_classifiers:
                 self.logger.info("Start training of classifier %s", cl_name)
-                classifier = classifier_factory(num_classes)
+                classifier = classifier_factory(target_label, input_dim, output_dim, val_cert_embeddings)
                 classifier.fit(complete_train_data, complete_train_labels)
 
                 classifier_file_name = "cl_{}_{}.model".format(cl_name, target_label).lower()
-                classifier_file = os.path.join(AppContext.default().output_dir, classifier_file_name)
+                classifier_file = os.path.join(models_dir, classifier_file_name)
                 try:
                     joblib.dump(classifier, classifier_file)
                 except:
@@ -292,7 +310,7 @@ class Clef18Task1V2(LoggingMixin):
             except KeyError:
                 self.logger.error("Can't create embedding for '%s'", word)
 
-        embedding = Embedding(len(word_index)+1, ft_model.vector_size, weights=[embedding_matrix])
+        embedding = Embedding(len(word_index)+1, ft_model.vector_size, weights=[embedding_matrix], mask_zero=True)
 
         # Model 1: Learn a representation of a line originating from a death certificate
         input_certificate_line = Input((max_cert_length, ))
@@ -530,6 +548,22 @@ class Clef18Task1V2(LoggingMixin):
         return k.models.load_model(args.emb_model)
 
 
+    def create_dnn_classifier(self, model_name, label: str, val_data: Tuple, **kwargs):
+        if val_data is not None:
+            monitor_loss = "val_loss"
+        else:
+            monitor_loss = "loss"
+
+        callbacks = [
+            ku.best_model_checkpointing_by_model_name(model_name),
+            ku.csv_logging_callback(model_name, label),
+            ku.early_stopping(monitor_loss, 5)
+        ]
+
+        kwargs["callbacks"] = callbacks
+        return nnc.dense_network(**kwargs)
+
+
 class NegativeSampling(LoggingMixin):
 
     def __init__(self):
@@ -548,7 +582,10 @@ class NegativeSampling(LoggingMixin):
     def default_strategy(self, num_negative_samples: int) -> Callable:
         def _sample(dictionary_df: DataFrame, line_icd10_code: str):
             negative_samples = dictionary_df.query("ICD10 != '%s'" % line_icd10_code)
-            negative_samples = negative_samples.sample(num_negative_samples)
+
+            # Only necessary during development and tests with only very few examples
+            if len(negative_samples) > 0:
+                negative_samples = negative_samples.sample(min(num_negative_samples, len(negative_samples)))
 
             return negative_samples
 
@@ -627,15 +664,17 @@ if __name__ == "__main__":
 
     clef_data = Clef18Task1Data()
     dictionary = clef_data.read_dictionary_by_language(args.lang)
+    #dictionary = dictionary.sample(1200)
+
     certificates = clef_data.read_train_certifcates_by_language(args.lang)
     certificates = clef_data.filter_single_code_lines(certificates)
     certificates = clef_data.add_masked_icd10_column(certificates, 10)
 
     sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
-    ft_model = FastText(sentences, min_count=1)
+    #ft_model = FastText(sentences, min_count=1)
 
     ft_embeddings = FastTextEmbeddings()
-    #ft_model = ft_embeddings.load_embeddings_by_language(args.lang)
+    ft_model = ft_embeddings.load_embeddings_by_language(args.lang)
 
     clef18_task1 = Clef18Task1V2()
     neg_sampling = NegativeSampling()
diff --git a/code_mario/dnn_classifiers.py b/code_mario/dnn_classifiers.py
index e4a4b25..b19efb6 100644
--- a/code_mario/dnn_classifiers.py
+++ b/code_mario/dnn_classifiers.py
@@ -12,14 +12,14 @@ from keras_extension import ExtendedKerasClassifier
 class NeuralNetworkClassifiers(object):
 
     @staticmethod
-    def dense_network(input_size: int, target_classes: int, hidden_layer_sizes: List[int], batch_normalization: bool,
+    def dense_network(input_dim: int, output_dim: int, hidden_layer_sizes: List[int], batch_normalization: bool,
                       dropout_rate: float, epochs: int, batch_size: int, callbacks: List = None):
         def _build_model():
             model = Sequential()
 
             for i, layer_size in enumerate(hidden_layer_sizes):
                 if i == 0:
-                    model.add(Dense(layer_size, input_shape=(input_size,), kernel_initializer=VarianceScaling(), activation="selu"))
+                    model.add(Dense(layer_size, input_shape=(input_dim,), kernel_initializer=VarianceScaling(), activation="selu"))
                 else:
                     model.add(Dense(layer_size, kernel_initializer=VarianceScaling(), activation="selu"))
 
@@ -29,7 +29,7 @@ class NeuralNetworkClassifiers(object):
                 if dropout_rate and dropout_rate > 0.0:
                     model.add(Dropout(dropout_rate))
 
-            model.add(Dense(target_classes, activation="softmax"))
+            model.add(Dense(output_dim, activation="softmax"))
             model.compile(optimizer=Adam(), loss="sparse_categorical_crossentropy", metrics=['accuracy'])
 
             return model
diff --git a/code_mario/keras_extension.py b/code_mario/keras_extension.py
index 33d4116..732bbfc 100644
--- a/code_mario/keras_extension.py
+++ b/code_mario/keras_extension.py
@@ -2,7 +2,7 @@ import os
 import keras as k
 
 from logging import Logger
-from keras.callbacks import Callback, ModelCheckpoint
+from keras.callbacks import Callback, ModelCheckpoint, CSVLogger, EarlyStopping
 from keras.wrappers.scikit_learn import KerasClassifier
 
 from app_context import AppContext
@@ -13,13 +13,33 @@ class KerasUtil(object):
 
     @staticmethod
     def best_model_checkpointing_by_model_name(model_name: str, monitor_loss: str = "loss"):
-        best_model_file = os.path.join(AppContext.default().output_dir, "%s_best.h5" % model_name)
+        models_dir = os.path.join(AppContext.default().output_dir, "models")
+        os.makedirs(models_dir, exist_ok=True)
+
+        best_model_file = os.path.join(models_dir, "%s_best.h5" % model_name)
         return ModelCheckpoint(filepath=best_model_file, monitor=monitor_loss, save_best_only=True, verbose=1)
 
     @staticmethod
     def best_model_checkpointing_by_file_path(best_model_file: str, monitor_loss: str = "loss"):
         return ModelCheckpoint(filepath=best_model_file, monitor=monitor_loss, save_best_only=True, verbose=1)
 
+    @staticmethod
+    def early_stopping(monitor_loss: str, patience: int):
+        return EarlyStopping(monitor_loss, patience=patience, verbose=1)
+
+    @staticmethod
+    def csv_logging_callback(model_name: str, label: str):
+        train_log_dir = os.path.join(AppContext.default().log_dir, "train_logs")
+        try:
+            os.makedirs(train_log_dir, exist_ok=True)
+        except:
+            print("Can't create train log directory: " + train_log_dir)
+
+        log_file_name = "%s_%s.log" % (model_name, label)
+        training_log_file = os.path.join(train_log_dir, log_file_name)
+
+        return CSVLogger(training_log_file, separator=";", append=True)
+
 
 class LoggerCallback(Callback):
 
@@ -53,7 +73,7 @@ class ExtendedKerasClassifier(KerasClassifier, LoggingMixin):
                 checkpoint_callbacks = [callback for callback in self.sk_params["callbacks"]
                                         if isinstance(callback, ModelCheckpoint) and callback.save_best_only]
                 if checkpoint_callbacks:
-                    self.logger.info("Reloading model from %s", checkpoint_callbacks[0].filepath)
+                    self.logger.debug("Reloading model from %s", checkpoint_callbacks[0].filepath)
                     self.model = k.models.load_model(checkpoint_callbacks[0].filepath)
                     self.re_fitted = False
                 else:
@@ -61,9 +81,10 @@ class ExtendedKerasClassifier(KerasClassifier, LoggingMixin):
             else:
                 self.logger.debug("Can't find callbacks parameter. No callbacks configured?")
         else:
-            self.logger.debug("Model wasn't re-fitted -> re-using existing model")
+            #self.logger.debug("Model wasn't re-fitted -> re-using existing model")
             pass
-        self.logger.info("Classifer has %s classes", len(self.classes_))
+
+        #self.logger.debug("Classifer has %s classes", len(self.classes_))
         return super(ExtendedKerasClassifier, self).predict(x, **kwargs)
 
     def __getstate__(self):
-- 
GitLab