@inproceedings{kang-etal-2025-reassessing,
title = "Reassessing Graph Linearization for Sequence-to-sequence {AMR} Parsing: On the Advantages and Limitations of Triple-Based",
author = "Kang, Jeongwoo and
Coavoux, Maximin and
Schwab, Didier and
Lopez, C{\'e}dric",
editor = "Drozd, Aleksandr and
Sedoc, Jo{\~a}o and
Tafreshi, Shabnam and
Akula, Arjun and
Shu, Raphael",
booktitle = "The Sixth Workshop on Insights from Negative Results in NLP",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.insights-1.3/",
doi = "10.18653/v1/2025.insights-1.3",
pages = "15--23",
ISBN = "979-8-89176-240-4",
abstract = "Sequence-to-sequence models are widely used to train Abstract Meaning Representation (Banarescu et al.,2013, AMR) parsers. To train such models, AMR graphs have to be linearized into a one-line text format. While Penman encoding is widely used for this purpose, we argue that it has limitations: 1) for deep graphs, some closely related nodes are located far apart in the linearized text 2) Penman{'}s tree-based encoding necessitates inverse roles to handle node re-entrancy, doubling the number of relation types to predict. To address these issues, we propose a triple-based linearization method and compare its efficiency by training an AMR parser with both approaches. Although triple is well suited to represent a graph, our results show that it does not yet improve performance on deeper or longer graphs. It suggests room for improvement in its design to better compete with Penman{'}s concise representation and explicit encoding of a nested graph structure."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kang-etal-2025-reassessing">
<titleInfo>
<title>Reassessing Graph Linearization for Sequence-to-sequence AMR Parsing: On the Advantages and Limitations of Triple-Based</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jeongwoo</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maximin</namePart>
<namePart type="family">Coavoux</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Didier</namePart>
<namePart type="family">Schwab</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cédric</namePart>
<namePart type="family">Lopez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The Sixth Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Drozd</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shabnam</namePart>
<namePart type="family">Tafreshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arjun</namePart>
<namePart type="family">Akula</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raphael</namePart>
<namePart type="family">Shu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-240-4</identifier>
</relatedItem>
<abstract>Sequence-to-sequence models are widely used to train Abstract Meaning Representation (Banarescu et al.,2013, AMR) parsers. To train such models, AMR graphs have to be linearized into a one-line text format. While Penman encoding is widely used for this purpose, we argue that it has limitations: 1) for deep graphs, some closely related nodes are located far apart in the linearized text 2) Penman’s tree-based encoding necessitates inverse roles to handle node re-entrancy, doubling the number of relation types to predict. To address these issues, we propose a triple-based linearization method and compare its efficiency by training an AMR parser with both approaches. Although triple is well suited to represent a graph, our results show that it does not yet improve performance on deeper or longer graphs. It suggests room for improvement in its design to better compete with Penman’s concise representation and explicit encoding of a nested graph structure.</abstract>
<identifier type="citekey">kang-etal-2025-reassessing</identifier>
<identifier type="doi">10.18653/v1/2025.insights-1.3</identifier>
<location>
<url>https://aclanthology.org/2025.insights-1.3/</url>
</location>
<part>
<date>2025-05</date>
<extent unit="page">
<start>15</start>
<end>23</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reassessing Graph Linearization for Sequence-to-sequence AMR Parsing: On the Advantages and Limitations of Triple-Based
%A Kang, Jeongwoo
%A Coavoux, Maximin
%A Schwab, Didier
%A Lopez, Cédric
%Y Drozd, Aleksandr
%Y Sedoc, João
%Y Tafreshi, Shabnam
%Y Akula, Arjun
%Y Shu, Raphael
%S The Sixth Workshop on Insights from Negative Results in NLP
%D 2025
%8 May
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-240-4
%F kang-etal-2025-reassessing
%X Sequence-to-sequence models are widely used to train Abstract Meaning Representation (Banarescu et al.,2013, AMR) parsers. To train such models, AMR graphs have to be linearized into a one-line text format. While Penman encoding is widely used for this purpose, we argue that it has limitations: 1) for deep graphs, some closely related nodes are located far apart in the linearized text 2) Penman’s tree-based encoding necessitates inverse roles to handle node re-entrancy, doubling the number of relation types to predict. To address these issues, we propose a triple-based linearization method and compare its efficiency by training an AMR parser with both approaches. Although triple is well suited to represent a graph, our results show that it does not yet improve performance on deeper or longer graphs. It suggests room for improvement in its design to better compete with Penman’s concise representation and explicit encoding of a nested graph structure.
%R 10.18653/v1/2025.insights-1.3
%U https://aclanthology.org/2025.insights-1.3/
%U https://doi.org/10.18653/v1/2025.insights-1.3
%P 15-23
Markdown (Informal)
[Reassessing Graph Linearization for Sequence-to-sequence AMR Parsing: On the Advantages and Limitations of Triple-Based](https://aclanthology.org/2025.insights-1.3/) (Kang et al., insights 2025)
ACL