@inproceedings{opitz-etal-2021-explainable,
title = "Explainable Unsupervised Argument Similarity Rating with {A}bstract {M}eaning {R}epresentation and Conclusion Generation",
author = "Opitz, Juri and
Heinisch, Philipp and
Wiesenbach, Philipp and
Cimiano, Philipp and
Frank, Anette",
editor = "Al-Khatib, Khalid and
Hou, Yufang and
Stede, Manfred",
booktitle = "Proceedings of the 8th Workshop on Argument Mining",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.argmining-1.3",
doi = "10.18653/v1/2021.argmining-1.3",
pages = "24--35",
abstract = "When assessing the similarity of arguments, researchers typically use approaches that do not provide interpretable evidence or justifications for their ratings. Hence, the features that determine argument similarity remain elusive. We address this issue by introducing \textit{novel argument similarity metrics} that aim at high performance and explainability. We show that Abstract Meaning Representation (AMR) graphs can be useful for representing arguments, and that novel AMR graph metrics can offer explanations for argument similarity ratings. We start from the hypothesis that \textit{similar premises} often lead to \textit{similar conclusions}{---}and extend an approach for \textit{AMR-based argument similarity rating} by estimating, in addition, the similarity of \textit{conclusions} that we automatically infer from the arguments used as premises. We show that AMR similarity metrics make argument similarity judgements more \textit{interpretable} and may even support \textit{argument quality judgements}. Our approach provides significant performance improvements over strong baselines in a \textit{fully unsupervised} setting. Finally, we make first steps to address the problem of reference-less evaluation of argumentative conclusion generations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="opitz-etal-2021-explainable">
<titleInfo>
<title>Explainable Unsupervised Argument Similarity Rating with Abstract Meaning Representation and Conclusion Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Juri</namePart>
<namePart type="family">Opitz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Heinisch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Wiesenbach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Cimiano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anette</namePart>
<namePart type="family">Frank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 8th Workshop on Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Khalid</namePart>
<namePart type="family">Al-Khatib</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufang</namePart>
<namePart type="family">Hou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Stede</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>When assessing the similarity of arguments, researchers typically use approaches that do not provide interpretable evidence or justifications for their ratings. Hence, the features that determine argument similarity remain elusive. We address this issue by introducing novel argument similarity metrics that aim at high performance and explainability. We show that Abstract Meaning Representation (AMR) graphs can be useful for representing arguments, and that novel AMR graph metrics can offer explanations for argument similarity ratings. We start from the hypothesis that similar premises often lead to similar conclusions—and extend an approach for AMR-based argument similarity rating by estimating, in addition, the similarity of conclusions that we automatically infer from the arguments used as premises. We show that AMR similarity metrics make argument similarity judgements more interpretable and may even support argument quality judgements. Our approach provides significant performance improvements over strong baselines in a fully unsupervised setting. Finally, we make first steps to address the problem of reference-less evaluation of argumentative conclusion generations.</abstract>
<identifier type="citekey">opitz-etal-2021-explainable</identifier>
<identifier type="doi">10.18653/v1/2021.argmining-1.3</identifier>
<location>
<url>https://aclanthology.org/2021.argmining-1.3</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>24</start>
<end>35</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Explainable Unsupervised Argument Similarity Rating with Abstract Meaning Representation and Conclusion Generation
%A Opitz, Juri
%A Heinisch, Philipp
%A Wiesenbach, Philipp
%A Cimiano, Philipp
%A Frank, Anette
%Y Al-Khatib, Khalid
%Y Hou, Yufang
%Y Stede, Manfred
%S Proceedings of the 8th Workshop on Argument Mining
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F opitz-etal-2021-explainable
%X When assessing the similarity of arguments, researchers typically use approaches that do not provide interpretable evidence or justifications for their ratings. Hence, the features that determine argument similarity remain elusive. We address this issue by introducing novel argument similarity metrics that aim at high performance and explainability. We show that Abstract Meaning Representation (AMR) graphs can be useful for representing arguments, and that novel AMR graph metrics can offer explanations for argument similarity ratings. We start from the hypothesis that similar premises often lead to similar conclusions—and extend an approach for AMR-based argument similarity rating by estimating, in addition, the similarity of conclusions that we automatically infer from the arguments used as premises. We show that AMR similarity metrics make argument similarity judgements more interpretable and may even support argument quality judgements. Our approach provides significant performance improvements over strong baselines in a fully unsupervised setting. Finally, we make first steps to address the problem of reference-less evaluation of argumentative conclusion generations.
%R 10.18653/v1/2021.argmining-1.3
%U https://aclanthology.org/2021.argmining-1.3
%U https://doi.org/10.18653/v1/2021.argmining-1.3
%P 24-35
Markdown (Informal)
[Explainable Unsupervised Argument Similarity Rating with Abstract Meaning Representation and Conclusion Generation](https://aclanthology.org/2021.argmining-1.3) (Opitz et al., ArgMining 2021)
ACL