@inproceedings{yerebakan-etal-2018-document,
title = "Document Representation Learning for Patient History Visualization",
author = "Yerebakan, Halid Ziya and
Shinagawa, Yoshihisa and
Bhatia, Parmeet and
Zhan, Yiqiang",
editor = "Zhao, Dongyan",
booktitle = "Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations",
month = aug,
year = "2018",
address = "Santa Fe, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/C18-2007",
pages = "30--33",
abstract = "We tackle the problem of generating a diagrammatic summary of a set of documents each of which pertains to loosely related topics. In particular, we aim at visualizing the medical histories of patients. In medicine, choosing relevant reports from a patient{'}s past exams for comparison provide valuable information for precise treatment planning. Manually finding the relevant reports for comparison studies from a large database is time-consuming, which could result overlooking of some critical information. This task can be automated by defining similarity among documents which is a nontrivial task since these documents are often stored in an unstructured text format. To facilitate this, we have used a representation learning algorithm that creates a semantic representation space for documents where the clinically related documents lie close to each other. We have utilized referral information to weakly supervise a LSTM network to learn this semantic space. The abstract representations within this semantic space are not only useful to visualize disease progressions corresponding to the relevant report groups of a patient, but are also beneficial to analyze diseases at the population level. The proposed key tool here is clustering of documents based on the document similarity whose metric is learned from corpora.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yerebakan-etal-2018-document">
<titleInfo>
<title>Document Representation Learning for Patient History Visualization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Halid</namePart>
<namePart type="given">Ziya</namePart>
<namePart type="family">Yerebakan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoshihisa</namePart>
<namePart type="family">Shinagawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Parmeet</namePart>
<namePart type="family">Bhatia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiqiang</namePart>
<namePart type="family">Zhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dongyan</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Santa Fe, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We tackle the problem of generating a diagrammatic summary of a set of documents each of which pertains to loosely related topics. In particular, we aim at visualizing the medical histories of patients. In medicine, choosing relevant reports from a patient’s past exams for comparison provide valuable information for precise treatment planning. Manually finding the relevant reports for comparison studies from a large database is time-consuming, which could result overlooking of some critical information. This task can be automated by defining similarity among documents which is a nontrivial task since these documents are often stored in an unstructured text format. To facilitate this, we have used a representation learning algorithm that creates a semantic representation space for documents where the clinically related documents lie close to each other. We have utilized referral information to weakly supervise a LSTM network to learn this semantic space. The abstract representations within this semantic space are not only useful to visualize disease progressions corresponding to the relevant report groups of a patient, but are also beneficial to analyze diseases at the population level. The proposed key tool here is clustering of documents based on the document similarity whose metric is learned from corpora.</abstract>
<identifier type="citekey">yerebakan-etal-2018-document</identifier>
<location>
<url>https://aclanthology.org/C18-2007</url>
</location>
<part>
<date>2018-08</date>
<extent unit="page">
<start>30</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Document Representation Learning for Patient History Visualization
%A Yerebakan, Halid Ziya
%A Shinagawa, Yoshihisa
%A Bhatia, Parmeet
%A Zhan, Yiqiang
%Y Zhao, Dongyan
%S Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations
%D 2018
%8 August
%I Association for Computational Linguistics
%C Santa Fe, New Mexico
%F yerebakan-etal-2018-document
%X We tackle the problem of generating a diagrammatic summary of a set of documents each of which pertains to loosely related topics. In particular, we aim at visualizing the medical histories of patients. In medicine, choosing relevant reports from a patient’s past exams for comparison provide valuable information for precise treatment planning. Manually finding the relevant reports for comparison studies from a large database is time-consuming, which could result overlooking of some critical information. This task can be automated by defining similarity among documents which is a nontrivial task since these documents are often stored in an unstructured text format. To facilitate this, we have used a representation learning algorithm that creates a semantic representation space for documents where the clinically related documents lie close to each other. We have utilized referral information to weakly supervise a LSTM network to learn this semantic space. The abstract representations within this semantic space are not only useful to visualize disease progressions corresponding to the relevant report groups of a patient, but are also beneficial to analyze diseases at the population level. The proposed key tool here is clustering of documents based on the document similarity whose metric is learned from corpora.
%U https://aclanthology.org/C18-2007
%P 30-33
Markdown (Informal)
[Document Representation Learning for Patient History Visualization](https://aclanthology.org/C18-2007) (Yerebakan et al., COLING 2018)
ACL
- Halid Ziya Yerebakan, Yoshihisa Shinagawa, Parmeet Bhatia, and Yiqiang Zhan. 2018. Document Representation Learning for Patient History Visualization. In Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pages 30–33, Santa Fe, New Mexico. Association for Computational Linguistics.