@inproceedings{putra-etal-2021-multi,
title = "Multi-task and Multi-corpora Training Strategies to Enhance Argumentative Sentence Linking Performance",
author = "Putra, Jan Wira Gotama and
Teufel, Simone and
Tokunaga, Takenobu",
editor = "Al-Khatib, Khalid and
Hou, Yufang and
Stede, Manfred",
booktitle = "Proceedings of the 8th Workshop on Argument Mining",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.argmining-1.2",
doi = "10.18653/v1/2021.argmining-1.2",
pages = "12--23",
abstract = "Argumentative structure prediction aims to establish links between textual units and label the relationship between them, forming a structured representation for a given input text. The former task, linking, has been identified by earlier works as particularly challenging, as it requires finding the most appropriate structure out of a very large search space of possible link combinations. In this paper, we improve a state-of-the-art linking model by using multi-task and multi-corpora training strategies. Our auxiliary tasks help the model to learn the role of each sentence in the argumentative structure. Combining multi-corpora training with a selective sampling strategy increases the training data size while ensuring that the model still learns the desired target distribution well. Experiments on essays written by English-as-a-foreign-language learners show that both strategies significantly improve the model{'}s performance; for instance, we observe a 15.8{\%} increase in the F1-macro for individual link predictions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="putra-etal-2021-multi">
<titleInfo>
<title>Multi-task and Multi-corpora Training Strategies to Enhance Argumentative Sentence Linking Performance</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="given">Wira</namePart>
<namePart type="given">Gotama</namePart>
<namePart type="family">Putra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simone</namePart>
<namePart type="family">Teufel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takenobu</namePart>
<namePart type="family">Tokunaga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 8th Workshop on Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Khalid</namePart>
<namePart type="family">Al-Khatib</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufang</namePart>
<namePart type="family">Hou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Stede</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Argumentative structure prediction aims to establish links between textual units and label the relationship between them, forming a structured representation for a given input text. The former task, linking, has been identified by earlier works as particularly challenging, as it requires finding the most appropriate structure out of a very large search space of possible link combinations. In this paper, we improve a state-of-the-art linking model by using multi-task and multi-corpora training strategies. Our auxiliary tasks help the model to learn the role of each sentence in the argumentative structure. Combining multi-corpora training with a selective sampling strategy increases the training data size while ensuring that the model still learns the desired target distribution well. Experiments on essays written by English-as-a-foreign-language learners show that both strategies significantly improve the model’s performance; for instance, we observe a 15.8% increase in the F1-macro for individual link predictions.</abstract>
<identifier type="citekey">putra-etal-2021-multi</identifier>
<identifier type="doi">10.18653/v1/2021.argmining-1.2</identifier>
<location>
<url>https://aclanthology.org/2021.argmining-1.2</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>12</start>
<end>23</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-task and Multi-corpora Training Strategies to Enhance Argumentative Sentence Linking Performance
%A Putra, Jan Wira Gotama
%A Teufel, Simone
%A Tokunaga, Takenobu
%Y Al-Khatib, Khalid
%Y Hou, Yufang
%Y Stede, Manfred
%S Proceedings of the 8th Workshop on Argument Mining
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F putra-etal-2021-multi
%X Argumentative structure prediction aims to establish links between textual units and label the relationship between them, forming a structured representation for a given input text. The former task, linking, has been identified by earlier works as particularly challenging, as it requires finding the most appropriate structure out of a very large search space of possible link combinations. In this paper, we improve a state-of-the-art linking model by using multi-task and multi-corpora training strategies. Our auxiliary tasks help the model to learn the role of each sentence in the argumentative structure. Combining multi-corpora training with a selective sampling strategy increases the training data size while ensuring that the model still learns the desired target distribution well. Experiments on essays written by English-as-a-foreign-language learners show that both strategies significantly improve the model’s performance; for instance, we observe a 15.8% increase in the F1-macro for individual link predictions.
%R 10.18653/v1/2021.argmining-1.2
%U https://aclanthology.org/2021.argmining-1.2
%U https://doi.org/10.18653/v1/2021.argmining-1.2
%P 12-23
Markdown (Informal)
[Multi-task and Multi-corpora Training Strategies to Enhance Argumentative Sentence Linking Performance](https://aclanthology.org/2021.argmining-1.2) (Putra et al., ArgMining 2021)
ACL