diff --git a/src/odroid/catkin_ws/src/car/scripts/saliency.py b/src/odroid/catkin_ws/src/car/scripts/saliency.py
index 4d9b7c7a05613f230ea80d0ae13b35674e699e6e..6765a65de31cb20f59412ad90579709cf5a353f5 100644
--- a/src/odroid/catkin_ws/src/car/scripts/saliency.py
+++ b/src/odroid/catkin_ws/src/car/scripts/saliency.py
@@ -1,7 +1,7 @@
 import sys
 sys.path.insert(0, "./deepviz")
 
-from vis.visualization import visualize_saliency, visualize_cam
+from vis.visualization import visualize_saliency, visualize_cam, overlay
 from keras import activations
 from utils import *
 from vis.utils import utils
@@ -54,32 +54,35 @@ from keras.models import Model
 # ax2.imshow(mask)
 
 images = [
-    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-819.jpg"), # left, train
-    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-625.jpg"), # straight, train
-    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-9640.jpg"), # right, train
-    load_image("I:/recordings/data-2018-09-07-15-59-32", "img-700.jpg"), # left, valid
+    load_image("I:/recordings/data-2018-08-31-12-12-14", "img-300.jpg"), # left, ideal line
+    load_image("I:/recordings/data-2018-08-31-12-12-14", "img-500.jpg"), # straight, ideal line
+    load_image("I:/recordings/data-2018-08-31-12-12-14", "img-6175.jpg"), # right, ideal line
+
+    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-819.jpg"), # left, center line
+    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-625.jpg"), # straight, center line
+    load_image("I:/recordings/data-2018-08-31-10-42-27", "img-9640.jpg"), # right, center line
 ]
 
 models = [
-    load_model("G:/car/models/model-001.h5"),
-    load_model("G:/car/models/model-005.h5"),
     load_model("G:/car/models/model-010.h5"),
+    load_model("G:/car/models/model-050.h5"),
+    load_model("G:/car/models/model-100.h5"),
 ]
 
-fig, axes = plt.subplots(len(images) * 2, len(models))
+fig, axes = plt.subplots(len(images), len(models))
 
 for j, model in enumerate(models):
-    layer_idx = -1
+    layer_idx = 6
     model.layers[layer_idx].activation = activations.linear
     model = utils.apply_modifications(model)
     for i, image in enumerate(images):
         image = preprocess_image(image)
-        grads = visualize_cam(model, layer_idx, filter_indices=None, seed_input=image, grad_modifier=None)
-        axes[(2 * i) + 1, j].imshow(grads)
+        grads = visualize_saliency(model, layer_idx, filter_indices=None, seed_input=image, backprop_modifier="guided")
 
         image = (image + 1.0) * 127.5
         image = image.astype(np.uint8)
         image = cv2.cvtColor(image, cv2.COLOR_YUV2RGB)
-        axes[2 * i, j].imshow(image)
+        
+        axes[i, j].imshow(overlay(image, grads, alpha=0.3))
 
 plt.show()
diff --git a/src/odroid/catkin_ws/src/car/scripts/train.py b/src/odroid/catkin_ws/src/car/scripts/train.py
index 20b3ba9319fa9ce46160578b5622e75bde4ea178..8592f67f74481e5336733dc97eedec228f1f2925 100644
--- a/src/odroid/catkin_ws/src/car/scripts/train.py
+++ b/src/odroid/catkin_ws/src/car/scripts/train.py
@@ -63,12 +63,10 @@ def build_model_nvidia(args):
     x = Dense(10, activation="elu")(x)
 
     angle_out = Dense(1, name="angle_out", activation="linear")(x)
-    throttle_out = Dense(1, name="throttle_out", activation="linear")(x)
 
-    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
+    model = Model(inputs=[img_in], outputs=[angle_out])
     model.compile(optimizer="adam",
-                  loss={"angle_out": "mean_squared_error",
-                        "throttle_out": "mean_squared_error"})
+                  loss={"angle_out": "mean_squared_error"})
     return model
 
 
@@ -113,12 +111,10 @@ def build_model_custom(args):
     x = Dense(50, activation="relu")(x)
 
     angle_out = Dense(1, name="angle_out", activation="linear")(x)
-    throttle_out = Dense(1, name="throttle_out", activation="linear")(x)
 
-    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
+    model = Model(inputs=[img_in], outputs=[angle_out])
     model.compile(optimizer="adam",
-                    loss={"angle_out": "mean_squared_error",
-                    "throttle_out": "mean_squared_error"})
+                  loss={"angle_out": "mean_squared_error"})
     return model
 
 
@@ -194,7 +190,7 @@ def main():
     parser.add_argument('-d', help='data directory',        dest='data_dir',          type=str,   default=default_training_dir)
     parser.add_argument('-t', help='test size fraction',    dest='test_size',         type=float, default=0.2)
     parser.add_argument('-k', help='drop out probability',  dest='keep_prob',         type=float, default=0.5)
-    parser.add_argument('-n', help='number of epochs',      dest='nb_epoch',          type=int,   default=100)
+    parser.add_argument('-n', help='number of epochs',      dest='nb_epoch',          type=int,   default=10)
     parser.add_argument('-b', help='batch size',            dest='batch_size',        type=int,   default=40)
     parser.add_argument('-o', help='save best models only', dest='save_best_only',    type=s2b,   default='false')
     parser.add_argument('-l', help='learning rate',         dest='learning_rate',     type=float, default=3.0e-4)
@@ -215,7 +211,7 @@ def main():
     #load data
     data = load_data(args)
     #build model
-    model = build_model_custom(args)
+    model = build_model_nvidia(args)
     #train model on data, it saves as model.h5 
     train_model(model, args, *data)
 
diff --git a/src/odroid/catkin_ws/src/car/scripts/utils.py b/src/odroid/catkin_ws/src/car/scripts/utils.py
index 08510829897b7aa426f3f377a93baead9ed9274d..b8bee47a8fc19d2708d959f27bbbc73422551cc8 100644
--- a/src/odroid/catkin_ws/src/car/scripts/utils.py
+++ b/src/odroid/catkin_ws/src/car/scripts/utils.py
@@ -53,7 +53,7 @@ def resize(image):
 
 
 def normalize(image):
-    return ne.evaluate("image / 127.5 - 1.0")
+    return ne.evaluate("(image / 127.5) - 1.0")
 
 
 def convert(image):
@@ -185,12 +185,12 @@ class BatchSequence(keras.utils.Sequence):
 
     def __getitem__(self, idx):
         inputs = np.empty([self.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])
-        outputs = [np.empty(self.batch_size), np.empty(self.batch_size)]
+        outputs = [np.empty(self.batch_size)]
 
         for batch_idx in range(self.batch_size):
             seq_idx = (idx * self.batch_size + batch_idx) % len(self.sequence)
             sample_idx = self.sequence[seq_idx]
-            angle, throttle = self.y[sample_idx]
+            angle, _ = self.y[sample_idx]
             image_idx = self.X[sample_idx]
             image = self.image_cache.get_image(image_idx)
             # augumentation
@@ -198,7 +198,6 @@ class BatchSequence(keras.utils.Sequence):
                 image, angle = augument(image, angle)
             inputs[batch_idx] = normalize(convert(image))
             outputs[0][batch_idx] = preprocess_angle(angle)
-            outputs[1][batch_idx] = preprocess_throttle(throttle)
 
         return inputs, outputs