@inproceedings{bigoulaeva-etal-2022-effective,
title = "Effective Cross-Task Transfer Learning for Explainable Natural Language Inference with T5",
author = "Bigoulaeva, Irina and
Singh Sachdeva, Rachneet and
Tayyar Madabushi, Harish and
Villavicencio, Aline and
Gurevych, Iryna",
editor = "Ghosh, Debanjan and
Beigman Klebanov, Beata and
Muresan, Smaranda and
Feldman, Anna and
Poria, Soujanya and
Chakrabarty, Tuhin",
booktitle = "Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.flp-1.8",
doi = "10.18653/v1/2022.flp-1.8",
pages = "54--60",
abstract = "We compare sequential fine-tuning with a model for multi-task learning in the context where we are interested in boosting performance on two of the tasks, one of which depends on the other. We test these models on the FigLang2022 shared task which requires participants to predict language inference labels on figurative language along with corresponding textual explanations of the inference predictions. Our results show that while sequential multi-task learning can be tuned to be good at the first of two target tasks, it performs less well on the second and additionally struggles with overfitting. Our findings show that simple sequential fine-tuning of text-to-text models is an extraordinarily powerful method of achieving cross-task knowledge transfer while simultaneously predicting multiple interdependent targets. So much so, that our best model achieved the (tied) highest score on the task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bigoulaeva-etal-2022-effective">
<titleInfo>
<title>Effective Cross-Task Transfer Learning for Explainable Natural Language Inference with T5</title>
</titleInfo>
<name type="personal">
<namePart type="given">Irina</namePart>
<namePart type="family">Bigoulaeva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rachneet</namePart>
<namePart type="family">Singh Sachdeva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harish</namePart>
<namePart type="family">Tayyar Madabushi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Beata</namePart>
<namePart type="family">Beigman Klebanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We compare sequential fine-tuning with a model for multi-task learning in the context where we are interested in boosting performance on two of the tasks, one of which depends on the other. We test these models on the FigLang2022 shared task which requires participants to predict language inference labels on figurative language along with corresponding textual explanations of the inference predictions. Our results show that while sequential multi-task learning can be tuned to be good at the first of two target tasks, it performs less well on the second and additionally struggles with overfitting. Our findings show that simple sequential fine-tuning of text-to-text models is an extraordinarily powerful method of achieving cross-task knowledge transfer while simultaneously predicting multiple interdependent targets. So much so, that our best model achieved the (tied) highest score on the task.</abstract>
<identifier type="citekey">bigoulaeva-etal-2022-effective</identifier>
<identifier type="doi">10.18653/v1/2022.flp-1.8</identifier>
<location>
<url>https://aclanthology.org/2022.flp-1.8</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>54</start>
<end>60</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Effective Cross-Task Transfer Learning for Explainable Natural Language Inference with T5
%A Bigoulaeva, Irina
%A Singh Sachdeva, Rachneet
%A Tayyar Madabushi, Harish
%A Villavicencio, Aline
%A Gurevych, Iryna
%Y Ghosh, Debanjan
%Y Beigman Klebanov, Beata
%Y Muresan, Smaranda
%Y Feldman, Anna
%Y Poria, Soujanya
%Y Chakrabarty, Tuhin
%S Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F bigoulaeva-etal-2022-effective
%X We compare sequential fine-tuning with a model for multi-task learning in the context where we are interested in boosting performance on two of the tasks, one of which depends on the other. We test these models on the FigLang2022 shared task which requires participants to predict language inference labels on figurative language along with corresponding textual explanations of the inference predictions. Our results show that while sequential multi-task learning can be tuned to be good at the first of two target tasks, it performs less well on the second and additionally struggles with overfitting. Our findings show that simple sequential fine-tuning of text-to-text models is an extraordinarily powerful method of achieving cross-task knowledge transfer while simultaneously predicting multiple interdependent targets. So much so, that our best model achieved the (tied) highest score on the task.
%R 10.18653/v1/2022.flp-1.8
%U https://aclanthology.org/2022.flp-1.8
%U https://doi.org/10.18653/v1/2022.flp-1.8
%P 54-60
Markdown (Informal)
[Effective Cross-Task Transfer Learning for Explainable Natural Language Inference with T5](https://aclanthology.org/2022.flp-1.8) (Bigoulaeva et al., Fig-Lang 2022)
ACL