@inproceedings{makhervaks-etal-2023-clinical,
title = "Clinical Contradiction Detection",
author = "Makhervaks, Dave and
Gillis, Plia and
Radinsky, Kira",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.80",
doi = "10.18653/v1/2023.emnlp-main.80",
pages = "1248--1263",
abstract = "Detecting contradictions in text is essential in determining the validity of the literature and sources that we consume. Medical corpora are riddled with conflicting statements. This is due to the large throughput of new studies and the difficulty in replicating experiments, such as clinical trials. Detecting contradictions in this domain is hard since it requires clinical expertise. We present a distant supervision approach that leverages a medical ontology to build a seed of potential clinical contradictions over 22 million medical abstracts. We automatically build a labeled training dataset consisting of paired clinical sentences that are grounded in an ontology and represent potential medical contradiction. The dataset is used to weakly-supervise state-of-the-art deep learning models showing significant empirical improvements across multiple medical contradiction datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="makhervaks-etal-2023-clinical">
<titleInfo>
<title>Clinical Contradiction Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dave</namePart>
<namePart type="family">Makhervaks</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Plia</namePart>
<namePart type="family">Gillis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kira</namePart>
<namePart type="family">Radinsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Detecting contradictions in text is essential in determining the validity of the literature and sources that we consume. Medical corpora are riddled with conflicting statements. This is due to the large throughput of new studies and the difficulty in replicating experiments, such as clinical trials. Detecting contradictions in this domain is hard since it requires clinical expertise. We present a distant supervision approach that leverages a medical ontology to build a seed of potential clinical contradictions over 22 million medical abstracts. We automatically build a labeled training dataset consisting of paired clinical sentences that are grounded in an ontology and represent potential medical contradiction. The dataset is used to weakly-supervise state-of-the-art deep learning models showing significant empirical improvements across multiple medical contradiction datasets.</abstract>
<identifier type="citekey">makhervaks-etal-2023-clinical</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.80</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.80</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>1248</start>
<end>1263</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Clinical Contradiction Detection
%A Makhervaks, Dave
%A Gillis, Plia
%A Radinsky, Kira
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F makhervaks-etal-2023-clinical
%X Detecting contradictions in text is essential in determining the validity of the literature and sources that we consume. Medical corpora are riddled with conflicting statements. This is due to the large throughput of new studies and the difficulty in replicating experiments, such as clinical trials. Detecting contradictions in this domain is hard since it requires clinical expertise. We present a distant supervision approach that leverages a medical ontology to build a seed of potential clinical contradictions over 22 million medical abstracts. We automatically build a labeled training dataset consisting of paired clinical sentences that are grounded in an ontology and represent potential medical contradiction. The dataset is used to weakly-supervise state-of-the-art deep learning models showing significant empirical improvements across multiple medical contradiction datasets.
%R 10.18653/v1/2023.emnlp-main.80
%U https://aclanthology.org/2023.emnlp-main.80
%U https://doi.org/10.18653/v1/2023.emnlp-main.80
%P 1248-1263
Markdown (Informal)
[Clinical Contradiction Detection](https://aclanthology.org/2023.emnlp-main.80) (Makhervaks et al., EMNLP 2023)
ACL
- Dave Makhervaks, Plia Gillis, and Kira Radinsky. 2023. Clinical Contradiction Detection. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 1248–1263, Singapore. Association for Computational Linguistics.