@inproceedings{briakou-etal-2023-explaining,
title = "Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences",
author = "Briakou, Eleftheria and
Goyal, Navita and
Carpuat, Marine",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.690/",
doi = "10.18653/v1/2023.emnlp-main.690",
pages = "11220--11237",
abstract = "Explainable NLP techniques primarily explain by answering {\textquotedblleft}Which tokens in the input are responsible for this prediction?{\textquotedblright}. We argue that for NLP models that make predictions by comparing two input texts, it is more useful to explain by answering {\textquotedblleft}What differences between the two inputs explain this prediction?{\textquotedblright}. We introduce a technique to generate contrastive phrasal highlights that explain the predictions of a semantic divergence model via phrase alignment guided erasure. We show that the resulting highlights match human rationales of cross-lingual semantic differences better than popular post-hoc saliency techniques and that they successfully help people detect fine-grained meaning differences in human translations and critical machine translation errors."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="briakou-etal-2023-explaining">
<titleInfo>
<title>Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eleftheria</namePart>
<namePart type="family">Briakou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Navita</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marine</namePart>
<namePart type="family">Carpuat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Explainable NLP techniques primarily explain by answering “Which tokens in the input are responsible for this prediction?”. We argue that for NLP models that make predictions by comparing two input texts, it is more useful to explain by answering “What differences between the two inputs explain this prediction?”. We introduce a technique to generate contrastive phrasal highlights that explain the predictions of a semantic divergence model via phrase alignment guided erasure. We show that the resulting highlights match human rationales of cross-lingual semantic differences better than popular post-hoc saliency techniques and that they successfully help people detect fine-grained meaning differences in human translations and critical machine translation errors.</abstract>
<identifier type="citekey">briakou-etal-2023-explaining</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.690</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.690/</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11220</start>
<end>11237</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences
%A Briakou, Eleftheria
%A Goyal, Navita
%A Carpuat, Marine
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F briakou-etal-2023-explaining
%X Explainable NLP techniques primarily explain by answering “Which tokens in the input are responsible for this prediction?”. We argue that for NLP models that make predictions by comparing two input texts, it is more useful to explain by answering “What differences between the two inputs explain this prediction?”. We introduce a technique to generate contrastive phrasal highlights that explain the predictions of a semantic divergence model via phrase alignment guided erasure. We show that the resulting highlights match human rationales of cross-lingual semantic differences better than popular post-hoc saliency techniques and that they successfully help people detect fine-grained meaning differences in human translations and critical machine translation errors.
%R 10.18653/v1/2023.emnlp-main.690
%U https://aclanthology.org/2023.emnlp-main.690/
%U https://doi.org/10.18653/v1/2023.emnlp-main.690
%P 11220-11237
Markdown (Informal)
[Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences](https://aclanthology.org/2023.emnlp-main.690/) (Briakou et al., EMNLP 2023)
ACL