diff --git a/paper/40_experiments.tex b/paper/40_experiments.tex
index 0793bcfb1f42268fcf457dedd607cf52fe2e844e..bc214fa57daeb991616d91f6cbd8e16834bf4cdd 100644
--- a/paper/40_experiments.tex
+++ b/paper/40_experiments.tex
@@ -39,7 +39,7 @@ In contrast using the balanced data set the model reaches an accuracy of 0.899 (
 \begin{table}[]
 \label{tab:s2s}
 \centering
-\begin{tabularx}{0.9\textwidth}{p{3cm}|c|c|c|c|c}
+\begin{tabularx}{0.97\textwidth}{l|C{2.7cm}|C{1.75cm}|C{1.25cm}|C{1.75cm}|C{1.25cm}}
 \toprule
 \multirow{2}{*}{\textbf{Setting}} & \multirow{2}{*}{\textbf{Trained Epochs}}&\multicolumn{2}{c|}{\textbf{Train}}&\multicolumn{2}{c}{\textbf{Validation}} \\ 
 \cline{3-6}
@@ -80,7 +80,7 @@ In contrast, using the extended data set the model reaches an accuracy of 0.954
 \begin{table}[]
 \label{tab:icd10Classification}
 \centering
-\begin{tabularx}{0.9\textwidth}{p{2.25cm}|c|c|c|c|c} 
+\begin{tabularx}{0.985\textwidth}{p{2.75cm}|C{2.6cm}|C{1.75cm}|C{1.25cm}|C{1.75cm}|C{1.25cm}} 
 \toprule
 %\multirow{2}{*}{\textbf{Tokenization}}&\multirow{2}{*}{\textbf{Model}}&\multirow{2}{*}{\textbf{Trained Epochs}}&\multicolumn{2}{c|}{\textbf{Train}}&\multicolumn{2}{c}{\textbf{Validation}} \\
 %\cline{4-7} 
@@ -110,7 +110,7 @@ The scores are calculated using a prevalence-weighted macro-average across the o
 
 \begin{table}[t!]
 \centering
-\begin{tabular}{l|c|c|c}
+\begin{tabular}{L{3cm}|C{2cm}|C{2cm}|C{2cm}}
 \toprule
 \textbf{Model} &  \textbf{Precision} & \textbf{Recall} & \textbf{F-score} \\
 \hline
@@ -148,9 +148,9 @@ Worst results were obtained on the middle, French, corpus while the biggest corp
 
 \begin{table}[]
 \centering
-\begin{tabularx}{0.8\textwidth}{p{2cm}|p{3cm}|c|c|c}
+\begin{tabularx}{0.95\textwidth}{L{2cm}|L{3cm}|C{2cm}|C{2cm}|C{2cm}}
 \toprule
-\textbf{Language} & \textbf{Model} & \textbf{Precision} & \textbf{Recall} & \textbf{F-score}\\
+\textbf{Language} & \multicolumn{1}{c|}{\textbf{Model}} & \textbf{Precision} & \textbf{Recall} & \textbf{F-score}\\
 \hline
 \multirow{2}{*}{French}
 & Final-Balanced & 0.494 & 0.246 & 0.329 \\
@@ -176,7 +176,7 @@ Worst results were obtained on the middle, French, corpus while the biggest corp
 \cline{2-5}
 & Baseline      & 0,165 & 0.172 & 0.169 \\
 & Average       & 0.844 & 0.760 & 0.799 \\
-& Median        & 0,900 & 0.824 & 0.863 \\
+& Median        & 0.900 & 0.824 & 0.863 \\
 \bottomrule
 \end{tabularx}
 \caption{Test results of the final pipeline. Final-Balanced = DCEM-Balanced + ICD-10\_Extended. Final-Full = DCEM-Full + ICD-10\_Extended}
diff --git a/paper/references.bib b/paper/references.bib
index 1dbd54967535b49075ca8313ca710284a47e0683..b60d956e95eaf14e6e378d4ddd7cb7c1ed00022b 100644
--- a/paper/references.bib
+++ b/paper/references.bib
@@ -269,7 +269,8 @@ The system proposed in this study provides automatic identification and characte
 	booktitle = {{CLEF} 2018 {Evaluation} {Labs} and {Workshop}: {Online} {Working} {Notes}},
 	publisher = {CEUR-WS},
 	author = {Névéol, Aurélie and Robert, Aude and Grippo, F and Morgand, C and Orsi, C and Pelikán, L and Ramadier, Lionel and Rey, Grégoire and Zweigenbaum, Pierre},
-	year = {2018}
+	year = {2018},
+	month = {September}
 }
 
 @inproceedings{cho_learning_2014,
@@ -279,7 +280,7 @@ The system proposed in this study provides automatic identification and characte
 	booktitle = {Proceedings of the 2014 {Conference} on {Empirical} {Methods} in {Natural} {Language} {Processing} ({EMNLP})},
 	publisher = {Association for Computational Linguistics},
 	author = {Cho, Kyunghyun and van Merrienboer, Bart and Gulcehre, Caglar and Bahdanau, Dzmitry and Bougares, Fethi and Schwenk, Holger and Bengio, Yoshua},
-	month = oct,
+	month = {October},
 	year = {2014},
 	pages = {1724--1734},
 	file = {Full Text PDF:/Users/mario/Zotero/storage/4NE9THT8/Cho et al. - 2014 - Learning Phrase Representations using RNN Encoder–.pdf:application/pdf}
@@ -293,7 +294,7 @@ The system proposed in this study provides automatic identification and characte
 	urldate = {2018-05-23},
 	journal = {CEUR workshop proceedings},
 	author = {Névéol, Aurélie and Cohen, K. Bretonnel and Grouin, Cyril and Hamon, Thierry and Lavergne, Thomas and Kelly, Liadh and Goeuriot, Lorraine and Rey, Grégoire and Robert, Aude and Tannier, Xavier and Zweigenbaum, Pierre},
-	month = sep,
+	month = {September},
 	year = {2016},
 	pmid = {29308065},
 	pmcid = {PMC5756095},
diff --git a/paper/wbi-eclef18.tex b/paper/wbi-eclef18.tex
index 04489516205346bc51813d430512102222eab12c..2609470a43504ac55e6be78406769480a3ae2afd 100644
--- a/paper/wbi-eclef18.tex
+++ b/paper/wbi-eclef18.tex
@@ -3,6 +3,7 @@
 % Version 2.20 of 2017/10/04
 %
 \documentclass[runningheads]{llncs}
+\pagestyle{empty}
 \usepackage[utf8]{inputenc} 
 \usepackage[english]{babel} 
 \usepackage{color}
@@ -10,6 +11,10 @@
 \usepackage{booktabs}
 \usepackage[hyphens]{url}
 \usepackage{hyperref}
+\usepackage{array}
+
+\newcolumntype{L}[1]{>{\raggedright\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
+\newcolumntype{C}[1]{>{\centering\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
 
  
 % Used for displaying a sample figure. If possible, figure files should