Skip to content
Snippets Groups Projects
Commit 3ca06d2d authored by Mario Sänger's avatar Mario Sänger
Browse files

Preparation of submission

parent 73a78c24
No related merge requests found
File added
File added
This diff is collapsed.
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{suominen_overview_2018}
\citation{neveol_clef_2018}
\select@language{english}
\@writefile{toc}{\select@language{english}}
\@writefile{lof}{\select@language{english}}
\@writefile{lot}{\select@language{english}}
\@writefile{toc}{\contentsline {title}{WBI at CLEF eHealth 2018 Task 1: Language-independent ICD-10 coding using multi-lingual embeddings and recurrent neural networks}{1}{chapter.1}}
\@writefile{toc}{\authcount {3}}
\@writefile{toc}{\contentsline {author}{Jurica \v {S}eva \and Mario S\IeC {\"a}nger \and Ulf Leser}{1}{chapter.1}}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1.1}}
\citation{cho_learning_2014,lample_neural_2016,dyer_transition-based_2015}
\citation{miftakhutdinov_kfu_2017}
\citation{bahdanau_neural_2018,cho_learning_2014}
\citation{bengio_scheduled_2015}
\citation{lample_neural_2016,wei_disease_2016}
\citation{dyer_transition-based_2015}
\citation{wang_part--speech_2015}
\@writefile{toc}{\contentsline {section}{\numberline {2}Related work}{2}{section.1.2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Recurrent neural networks (RNN)}{2}{subsection.1.2.1}}
\citation{hochreiter_gradient_2001,bengio_learning_1994}
\citation{hochreiter_long_1997}
\citation{cho_learning_2014}
\citation{turney_frequency_2010}
\citation{collobert_natural_2011}
\citation{bojanowski_enriching_2017,mikolov_distributed_2013,pennington_glove_2014,peters_deep_2018}
\citation{peters_semi-supervised_2017,peters_deep_2018,pinter_mimicking_2017}
\citation{mikolov_efficient_2013,mikolov_distributed_2013}
\citation{pennington_glove_2014}
\citation{peters_deep_2018}
\citation{pinter_mimicking_2017}
\citation{bojanowski_enriching_2017}
\citation{faruqui_improving_2014,xing_normalized_2015}
\citation{guo_cross-lingual_2015,vyas_sparse_2016}
\citation{pham_learning_2015}
\citation{sogaard_inverted_2015}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Word Embeddings}{3}{subsection.1.2.2}}
\citation{neveol_clinical_2016}
\citation{neveol_clef_2017}
\citation{cabot_sibm_2016,jonnagaddala_automatic_2017,van_mulligen_erasmus_2016}
\citation{dermouche_ecstra-inserm_2016,ebersbach_fusion_2017,ho-dac_litl_2016,miftakhutdinov_kfu_2017}
\citation{di_nunzio_lexicon_2017}
\citation{ho-dac_litl_2017}
\citation{ho-dac_litl_2016}
\citation{dermouche_ecstra-inserm_2016}
\citation{ebersbach_fusion_2017}
\citation{miftakhutdinov_kfu_2017}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}ICD-10 Classification}{4}{subsection.1.2.3}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Methods}{4}{section.1.3}}
\newlabel{sec:methods}{{3}{4}{Methods}{section.1.3}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Death Cause Extraction Model}{4}{subsection.1.3.1}}
\citation{sutskever_sequence_2014}
\citation{bojanowski_enriching_2017}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Illustration of the encoder-decoder model for death cause extraction. The encoder processes a death certificate line token-wise from left to right. The final state of the encoder forms a semantic representation of the line and serves as initial input for the decoding process. The decoder will be trained to predict the death cause text from the provided ICD-10 dictionaries word by word (using special tags \textbackslash s and \textbackslash e for start resp. end of a sequence). All input tokens will be represented using the concatenation of the fastText embeddings of all three languages.}}{5}{figure.1.1}}
\newlabel{fig:encoder_decoder}{{1}{5}{Illustration of the encoder-decoder model for death cause extraction. The encoder processes a death certificate line token-wise from left to right. The final state of the encoder forms a semantic representation of the line and serves as initial input for the decoding process. The decoder will be trained to predict the death cause text from the provided ICD-10 dictionaries word by word (using special tags \textbackslash s and \textbackslash e for start resp. end of a sequence). All input tokens will be represented using the concatenation of the fastText embeddings of all three languages}{figure.1.1}{}}
\citation{raffel_feed-forward_2016}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}ICD-10 Classification Model}{6}{subsection.1.3.2}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Illustration of the ICD-10 classification model. The model utilizes a bi-directional LSTM layer, which processes the death cause from left to right and vice versa. The attention layer summarizes the whole description by computing an adaptive weighted average over the LSTM states. The resulting death cause embedding will be feed through a softmax layer to get the final classification. Equivalent to our encoder-decoder model all input tokens will be represented using the concatenation of the fastText embeddings of all three languages.}}{6}{figure.1.2}}
\newlabel{fig:classification-model}{{2}{6}{Illustration of the ICD-10 classification model. The model utilizes a bi-directional LSTM layer, which processes the death cause from left to right and vice versa. The attention layer summarizes the whole description by computing an adaptive weighted average over the LSTM states. The resulting death cause embedding will be feed through a softmax layer to get the final classification. Equivalent to our encoder-decoder model all input tokens will be represented using the concatenation of the fastText embeddings of all three languages}{figure.1.2}{}}
\citation{kingma_adam:_2014}
\@writefile{toc}{\contentsline {section}{\numberline {4}Experiments and Results}{7}{section.1.4}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Training Data and Experiment Setup}{7}{subsection.1.4.1}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Death cause extraction model}{7}{subsection.1.4.2}}
\newlabel{tab:s2s}{{4.2}{8}{Death cause extraction model}{subsection.1.4.2}{}}
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Experiment results of our death cause extraction sequence-to-sequence model concerning balanced (equal number of training instances per language) and full data set setting.}}{8}{table.1.1}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}ICD-10 Classification Model}{8}{subsection.1.4.3}}
\newlabel{tab:icd10Classification}{{4.3}{9}{ICD-10 Classification Model}{subsection.1.4.3}{}}
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Experiment results for our ICD-10 classification model regarding different data settings. The \textit {Minimal} setting uses only ICD-10 codes with two or more training instances in the supplied dictionary. In contrast, \textit {Extended} additionally takes the diagnosis texts from the certificate data and duplicates ICD-10 training instances with only one diagnosis text in the dictionary and certificate lines. \textbf {*} Used in final pipeline.}}{9}{table.1.2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Complete Pipeline}{9}{subsection.1.4.4}}
\newlabel{tab:final_train}{{4.4}{9}{Complete Pipeline}{subsection.1.4.4}{}}
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Evaluation results of the final pipeline on the validation set of the training data. Reported figures represent the prevalence-weighted macro-average across the output classes. Final-Balanced = DCEM-Balanced + ICD-10\_Extended. Final-Full = DCEM-Full + ICD-10\_Extended}}{10}{table.1.3}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Test results of the final pipeline. Final-Balanced = DCEM-Balanced + ICD-10\_Extended. Final-Full = DCEM-Full + ICD-10\_Extended}}{11}{table.1.4}}
\newlabel{tab:final_test}{{4}{11}{Test results of the final pipeline. Final-Balanced = DCEM-Balanced + ICD-10\_Extended. Final-Full = DCEM-Full + ICD-10\_Extended}{table.1.4}{}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion and Future Work}{11}{section.1.5}}
\bibdata{references}
\bibcite{bahdanau_neural_2018}{1}
\bibcite{bengio_scheduled_2015}{2}
\bibcite{bengio_learning_1994}{3}
\bibcite{bojanowski_enriching_2017}{4}
\bibcite{cabot_sibm_2016}{5}
\bibcite{cho_learning_2014}{6}
\bibcite{collobert_natural_2011}{7}
\bibcite{dermouche_ecstra-inserm_2016}{8}
\bibcite{di_nunzio_lexicon_2017}{9}
\bibcite{dyer_transition-based_2015}{10}
\bibcite{ebersbach_fusion_2017}{11}
\bibcite{faruqui_improving_2014}{12}
\bibcite{guo_cross-lingual_2015}{13}
\bibcite{ho-dac_litl_2017}{14}
\bibcite{ho-dac_litl_2016}{15}
\bibcite{hochreiter_gradient_2001}{16}
\bibcite{hochreiter_long_1997}{17}
\bibcite{jonnagaddala_automatic_2017}{18}
\bibcite{kingma_adam:_2014}{19}
\bibcite{lample_neural_2016}{20}
\bibcite{miftakhutdinov_kfu_2017}{21}
\bibcite{mikolov_efficient_2013}{22}
\bibcite{mikolov_distributed_2013}{23}
\bibcite{van_mulligen_erasmus_2016}{24}
\bibcite{neveol_clef_2017}{25}
\bibcite{neveol_clinical_2016}{26}
\bibcite{neveol_clef_2018}{27}
\bibcite{pennington_glove_2014}{28}
\bibcite{peters_semi-supervised_2017}{29}
\bibcite{peters_deep_2018}{30}
\bibcite{pham_learning_2015}{31}
\bibcite{pinter_mimicking_2017}{32}
\bibcite{raffel_feed-forward_2016}{33}
\bibcite{suominen_overview_2018}{34}
\bibcite{sutskever_sequence_2014}{35}
\bibcite{sogaard_inverted_2015}{36}
\bibcite{turney_frequency_2010}{37}
\bibcite{vyas_sparse_2016}{38}
\bibcite{wang_part--speech_2015}{39}
\bibcite{wei_disease_2016}{40}
\bibcite{xing_normalized_2015}{41}
\bibstyle{splncs04}
\begin{thebibliography}{10}
\providecommand{\url}[1]{\texttt{#1}}
\providecommand{\urlprefix}{URL }
\providecommand{\doi}[1]{https://doi.org/#1}
\bibitem{bahdanau_neural_2018}
Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly
learning to align and translate. In: Proceedings of the 6th {International}
{Conference} on {Learning} {Representations} ({ICLR} 2018) (2018)
\bibitem{bengio_scheduled_2015}
Bengio, S., Vinyals, O., Jaitly, N., Shazeer, N.: Scheduled {Sampling} for
{Sequence} {Prediction} with {Recurrent} {Neural} {Networks}. In: Advances in
{Neural} {Information} {Processing} {Systems} 28, pp. 1171--1179. Curran
Associates, Inc. (2015)
\bibitem{bengio_learning_1994}
Bengio, Y., Simard, P., Frasconi, P.: Learning long-term dependencies with
gradient descent is difficult. IEEE transactions on neural networks
\textbf{5}(2), 157--166 (1994)
\bibitem{bojanowski_enriching_2017}
Bojanowski, P., Grave, E., Joulin, A., Mikolov, T.: Enriching {Word} {Vectors}
with {Subword} {Information}. Transactions of the Association of
Computational Linguistics \textbf{5}(1), 135--146 (2017)
\bibitem{cabot_sibm_2016}
Cabot, C., Soualmia, L.F., Dahamna, B., Darmoni, S.J.: {SIBM} at {CLEF}
{eHealth} {Evaluation} {Lab} 2016: {Extracting} {Concepts} in {French}
{Medical} {Texts} with {ECMT} and {CIMIND}. In: {CLEF} 2015 {Online}
{Working} {Notes}. CEUR-WS (2016)
\bibitem{cho_learning_2014}
Cho, K., van Merrienboer, B., Gulcehre, C., Bahdanau, D., Bougares, F.,
Schwenk, H., Bengio, Y.: Learning {Phrase} {Representations} using {RNN}
{Encoder}–{Decoder} for {Statistical} {Machine} {Translation}. In:
Proceedings of the 2014 {Conference} on {Empirical} {Methods} in {Natural}
{Language} {Processing} ({EMNLP}). pp. 1724--1734. Association for
Computational Linguistics, Doha, Qatar (October 2014)
\bibitem{collobert_natural_2011}
Collobert, R., Weston, J., Bottou, L., Karlen, M., Kavukcuoglu, K., Kuksa, P.:
Natural language processing (almost) from scratch. Journal of Machine
Learning Research \textbf{12}(Aug), 2493--2537 (2011)
\bibitem{dermouche_ecstra-inserm_2016}
Dermouche, M., Looten, V., Flicoteaux, R., Chevret, S., Velcin, J., Taright,
N.: {ECSTRA}-{INSERM}@ {CLEF} {eHealth}2016-task 2: {ICD}10 {Code}
{Extraction} from {Death} {Certificates}. In: {CLEF} 2016 {Online} {Working}
{Notes} (2016)
\bibitem{di_nunzio_lexicon_2017}
Di~Nunzio, G.M., Beghini, F., Vezzani, F., Henrot, G.: A {Lexicon} {Based}
{Approach} to {Classification} of {ICD}10 {Codes}. {IMS} {Unipd} at {CLEF}
{eHealth} {Task}. In: {CLEF} 2017 {Online} {Working} {Notes}. CEUR-WS (2017)
\bibitem{dyer_transition-based_2015}
Dyer, C., Ballesteros, M., Ling, W., Matthews, A., Smith, N.A.:
Transition-{Based} {Dependency} {Parsing} with {Stack} {Long} {Short}-{Term}
{Memory}. In: Proceedings of the 53rd {Annual} {Meeting} of the {Association}
for {Computational} {Linguistics} and the 7th {International} {Joint}
{Conference} on {Natural} {Language} {Processing} ({Volume} 1: {Long}
{Papers}). vol.~1, pp. 334--343 (2015)
\bibitem{ebersbach_fusion_2017}
Ebersbach, M., Herms, R., Eibl, M.: Fusion {Methods} for {ICD}10 {Code}
{Classification} of {Death} {Certificates} in {Multilingual} {Corpora}. In:
{CLEF} 2017 {Online} {Working} {Notes}. CEUR-WS (2017)
\bibitem{faruqui_improving_2014}
Faruqui, M., Dyer, C.: Improving vector space word representations using
multilingual correlation. In: Proceedings of the 14th {Conference} of the
{European} {Chapter} of the {Association} for {Computational} {Linguistics}.
pp. 462--471 (2014)
\bibitem{guo_cross-lingual_2015}
Guo, J., Che, W., Yarowsky, D., Wang, H., Liu, T.: Cross-lingual dependency
parsing based on distributed representations. In: Proceedings of the 53rd
{Annual} {Meeting} of the {Association} for {Computational} {Linguistics} and
the 7th {International} {Joint} {Conference} on {Natural} {Language}
{Processing} ({Volume} 1: {Long} {Papers}). vol.~1, pp. 1234--1244 (2015)
\bibitem{ho-dac_litl_2017}
Ho-Dac, L.M., Fabre, C., Birski, A., Boudraa, I., Bourriot, A., Cassier, M.,
Delvenne, L., Garcia-Gonzalez, C., Kang, E.B., Piccinini, E.: {LITL} at
{CLEF} {eHealth}2017: automatic classification of death reports. In: {CLEF}
2017 {Online} {Working} {Notes}. CEUR-WS (2017)
\bibitem{ho-dac_litl_2016}
Ho-Dac, L.M., Tanguy, L., Grauby, C., Mby, A.H., Malosse, J., Rivière, L.,
Veltz-Mauclair, A.: {LITL} at {CLEF} {eHealth}2016: recognizing entities in
{French} biomedical documents. In: {CLEF} 2016 {Online} {Working} {Notes}.
CEUR-WS (2016)
\bibitem{hochreiter_gradient_2001}
Hochreiter, S., Bengio, Y., Frasconi, P., Schmidhuber, J.: Gradient flow in
recurrent nets: the difficulty of learning long-term dependencies. A field
guide to dynamical recurrent neural networks. IEEE Press (2001)
\bibitem{hochreiter_long_1997}
Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural computation
\textbf{9}(8), 1735--1780 (1997)
\bibitem{jonnagaddala_automatic_2017}
Jonnagaddala, J., Hu, F.: Automatic coding of death certificates to {ICD}-10
terminology. In: {CLEF} 2017 {Online} {Working} {Notes}. CEUR-WS (2017)
\bibitem{kingma_adam:_2014}
Kingma, D.P., Ba, J.: Adam: {A} method for stochastic optimization. In:
Proceedings of the 3rd {International} {Conference} on {Learning}
{Representations} ({ICLR}) (2014)
\bibitem{lample_neural_2016}
Lample, G., Ballesteros, M., Subramanian, S., Kawakami, K., Dyer, C.: Neural
{Architectures} for {Named} {Entity} {Recognition}. In: Proceedings of the
15th {Annual} {Conference} of the {North} {American} {Chapter} of the
{Association} for {Computational} {Linguistics}: {Human} {Language}
{Technologies}. pp. 260--270 (2016)
\bibitem{miftakhutdinov_kfu_2017}
Miftahutdinov, Z., Tutubalina, E.: Kfu at clef ehealth 2017 task 1: {Icd}-10
coding of english death certificates with recurrent neural networks. In:
{CLEF} 2017 {Online} {Working} {Notes}. CEUR-WS (2017)
\bibitem{mikolov_efficient_2013}
Mikolov, T., Chen, K., Corrado, G., Dean, J.: Efficient estimation of word
representations in vector space. arXiv preprint arXiv:1301.3781 (2013)
\bibitem{mikolov_distributed_2013}
Mikolov, T., Sutskever, I., Chen, K., Corrado, G.S., Dean, J.: Distributed
representations of words and phrases and their compositionality. In: Advances
in neural information processing systems. pp. 3111--3119 (2013)
\bibitem{van_mulligen_erasmus_2016}
van Mulligen, E.M., Afzal, Z., Akhondi, S.A., Vo, D., Kors, J.A.: Erasmus {MC}
at {CLEF} {eHealth} 2016: {Concept} {Recognition} and {Coding} in {French}
{Texts}. In: {CLEF} 2016 {Online} {Working} {Notes}. CEUR-WS (2016)
\bibitem{neveol_clef_2017}
Névéol, A., Anderson, R.N., Cohen, K.B., Grouin, C., Lavergne, T., Rey, G.,
Robert, A., Rondet, C., Zweigenbaum, P.: {CLEF} {eHealth} 2017 {Multilingual}
{Information} {Extraction} task overview: {ICD}10 coding of death
certificates in {English} and {French}. In: {CLEF} 2017 {Evaluation} {Labs}
and {Workshop}: {Online} {Working} {Notes}, {CEUR}-{WS}. p.~17 (2017)
\bibitem{neveol_clinical_2016}
Névéol, A., Cohen, K.B., Grouin, C., Hamon, T., Lavergne, T., Kelly, L.,
Goeuriot, L., Rey, G., Robert, A., Tannier, X., Zweigenbaum, P.: Clinical
{Information} {Extraction} at the {CLEF} {eHealth} {Evaluation} lab 2016.
CEUR workshop proceedings \textbf{1609}, 28--42 (September 2016)
\bibitem{neveol_clef_2018}
Névéol, A., Robert, A., Grippo, F., Morgand, C., Orsi, C., Pelikán, L.,
Ramadier, L., Rey, G., Zweigenbaum, P.: {CLEF} {eHealth} 2018 {Multilingual}
{Information} {Extraction} task {Overview}: {ICD}10 {Coding} of {Death}
{Certificates} in {French}, {Hungarian} and {Italian}. In: {CLEF} 2018
{Evaluation} {Labs} and {Workshop}: {Online} {Working} {Notes}. CEUR-WS
(September 2018)
\bibitem{pennington_glove_2014}
Pennington, J., Socher, R., Manning, C.: Glove: {Global} vectors for word
representation. In: Proceedings of the 2014 conference on empirical methods
in natural language processing ({EMNLP}). pp. 1532--1543 (2014)
\bibitem{peters_semi-supervised_2017}
Peters, M., Ammar, W., Bhagavatula, C., Power, R.: Semi-supervised sequence
tagging with bidirectional language models. In: Proceedings of the 55th
{Annual} {Meeting} of the {Association} for {Computational} {Linguistics}
({Volume} 1: {Long} {Papers}). vol.~1, pp. 1756--1765 (2017)
\bibitem{peters_deep_2018}
Peters, M.E., Neumann, M., Iyyer, M., Gardner, M., Clark, C., Lee, K.,
Zettlemoyer, L.: Deep contextualized word representations. In: The 16th
{Annual} {Conference} of the {North} {American} {Chapter} of the
{Association} for {Computational} {Linguistics} (2018)
\bibitem{pham_learning_2015}
Pham, H., Luong, T., Manning, C.: Learning distributed representations for
multilingual text sequences. In: Proceedings of the 1st {Workshop} on
{Vector} {Space} {Modeling} for {Natural} {Language} {Processing}. pp. 88--94
(2015)
\bibitem{pinter_mimicking_2017}
Pinter, Y., Guthrie, R., Eisenstein, J.: Mimicking {Word} {Embeddings} using
{Subword} {RNNs}. In: Proceedings of the 2017 {Conference} on {Empirical}
{Methods} in {Natural} {Language} {Processing}. pp. 102--112 (2017)
\bibitem{raffel_feed-forward_2016}
Raffel, C., Ellis, D.P.: Feed-forward networks with attention can solve some
long-term memory problems. In: Workshop {Extended} {Abstracts} of the 4th
{International} {Conference} on {Learning} {Representations} (2016)
\bibitem{suominen_overview_2018}
Suominen, H., Kelly, L., Goeuriot, L., Kanoulas, E., Azzopardi, L., Spijker,
R., Li, D., Névéol, A., Ramadier, L., Robert, A., Zuccon, G., Palotti, J.:
Overview of the {CLEF} {eHealth} {Evaluation} {Lab} 2018. In: {CLEF} 2018 -
8th {Conference} and {Labs} of the {Evaluation} {Forum}. Lecture {Notes} in
{Computer} {Science} ({LNCS}), Springer (2018)
\bibitem{sutskever_sequence_2014}
Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural
networks. In: Advances in neural information processing systems. pp.
3104--3112 (2014)
\bibitem{sogaard_inverted_2015}
Søgaard, A., Agić, Z., Alonso, H.M., Plank, B., Bohnet, B., Johannsen, A.:
Inverted indexing for cross-lingual {NLP}. In: The 53rd {Annual} {Meeting} of
the {Association} for {Computational} {Linguistics} and the 7th
{International} {Joint} {Conference} of the {Asian} {Federation} of {Natural}
{Language} {Processing} ({ACL}-{IJCNLP} 2015) (2015)
\bibitem{turney_frequency_2010}
Turney, P.D., Pantel, P.: From frequency to meaning: {Vector} space models of
semantics. Journal of artificial intelligence research \textbf{37},
141--188 (2010)
\bibitem{vyas_sparse_2016}
Vyas, Y., Carpuat, M.: Sparse bilingual word representations for cross-lingual
lexical entailment. In: Proceedings of the 2016 {Conference} of the {North}
{American} {Chapter} of the {Association} for {Computational} {Linguistics}:
{Human} {Language} {Technologies}. pp. 1187--1197 (2016)
\bibitem{wang_part--speech_2015}
Wang, P., Qian, Y., Soong, F.K., He, L., Zhao, H.: Part-of-speech tagging with
bidirectional long short-term memory recurrent neural network. arXiv preprint
arXiv:1510.06168 (2015)
\bibitem{wei_disease_2016}
Wei, Q., Chen, T., Xu, R., He, Y., Gui, L.: Disease named entity recognition by
combining conditional random fields and bidirectional recurrent neural
networks. Database: The Journal of Biological Databases and Curation
\textbf{2016} (2016)
\bibitem{xing_normalized_2015}
Xing, C., Wang, D., Liu, C., Lin, Y.: Normalized word embedding and orthogonal
transform for bilingual word translation. In: Proceedings of the 2015
{Conference} of the {North} {American} {Chapter} of the {Association} for
{Computational} {Linguistics}: {Human} {Language} {Technologies}. pp.
1006--1011 (2015)
\end{thebibliography}
This is BibTeX, Version 0.99d (TeX Live 2014)
Capacity: max_strings=35307, hash_size=35307, hash_prime=30011
The top-level auxiliary file: wbi-eclef18.aux
The style file: splncs04.bst
Database file #1: references.bib
You've used 41 entries,
2850 wiz_defined-function locations,
796 strings with 15366 characters,
and the built_in function-call counts, 26474 in all, are:
= -- 2136
> -- 1052
< -- 31
+ -- 430
- -- 388
* -- 1691
:= -- 3417
add.period$ -- 67
call.type$ -- 41
change.case$ -- 378
chr.to.int$ -- 0
cite$ -- 41
duplicate$ -- 2289
empty$ -- 2300
format.name$ -- 429
if$ -- 5740
int.to.chr$ -- 0
int.to.str$ -- 41
missing$ -- 486
newline$ -- 129
num.names$ -- 82
pop$ -- 1034
preamble$ -- 1
purify$ -- 276
quote$ -- 0
skip$ -- 654
stack$ -- 0
substring$ -- 1306
swap$ -- 1203
text.length$ -- 31
text.prefix$ -- 0
top$ -- 0
type$ -- 162
warning$ -- 0
while$ -- 184
width$ -- 43
write$ -- 412
This diff is collapsed.
\BOOKMARK [0][-]{chapter.1}{WBI at CLEF eHealth 2018 Task 1: Language-independent ICD-10 coding using multi-lingual embeddings and recurrent neural networks}{}% 1
No preview for this file type
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment