@inproceedings{zhunis-chuang-2024-challenges,
title = "Challenges at {S}em{E}val 2024 Task 7: Contrastive Learning Approach on Numeral-Aware Language Generation",
author = "Zhunis, Ali and
Chuang, Hao-yun",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.semeval-1.236",
doi = "10.18653/v1/2024.semeval-1.236",
pages = "1659--1662",
abstract = "Although Large Language Model (LLM) excels on generating headline on ROUGE evaluation, it still fails to reason number and generate news article headline with accurate number. Attending SemEval-2024 Task 7 subtask 3, our team aims on using contrastive loss to increase the understanding of the number from their different expression, and knows to identify between different number and its respective expression. This system description paper uses T5 and BART as the baseline model, comparing its result with and without the constrative loss. The result shows that BART with contrastive loss have excelled all the models, and its performance on the number accuracy has the highest performance among all.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhunis-chuang-2024-challenges">
<titleInfo>
<title>Challenges at SemEval 2024 Task 7: Contrastive Learning Approach on Numeral-Aware Language Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Zhunis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hao-yun</namePart>
<namePart type="family">Chuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Atul</namePart>
<namePart type="given">Kr.</namePart>
<namePart type="family">Ojha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">A</namePart>
<namePart type="given">Seza</namePart>
<namePart type="family">Doğruöz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harish</namePart>
<namePart type="family">Tayyar Madabushi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giovanni</namePart>
<namePart type="family">Da San Martino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="family">Rosenthal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aiala</namePart>
<namePart type="family">Rosá</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Although Large Language Model (LLM) excels on generating headline on ROUGE evaluation, it still fails to reason number and generate news article headline with accurate number. Attending SemEval-2024 Task 7 subtask 3, our team aims on using contrastive loss to increase the understanding of the number from their different expression, and knows to identify between different number and its respective expression. This system description paper uses T5 and BART as the baseline model, comparing its result with and without the constrative loss. The result shows that BART with contrastive loss have excelled all the models, and its performance on the number accuracy has the highest performance among all.</abstract>
<identifier type="citekey">zhunis-chuang-2024-challenges</identifier>
<identifier type="doi">10.18653/v1/2024.semeval-1.236</identifier>
<location>
<url>https://aclanthology.org/2024.semeval-1.236</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>1659</start>
<end>1662</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Challenges at SemEval 2024 Task 7: Contrastive Learning Approach on Numeral-Aware Language Generation
%A Zhunis, Ali
%A Chuang, Hao-yun
%Y Ojha, Atul Kr.
%Y Doğruöz, A. Seza
%Y Tayyar Madabushi, Harish
%Y Da San Martino, Giovanni
%Y Rosenthal, Sara
%Y Rosá, Aiala
%S Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F zhunis-chuang-2024-challenges
%X Although Large Language Model (LLM) excels on generating headline on ROUGE evaluation, it still fails to reason number and generate news article headline with accurate number. Attending SemEval-2024 Task 7 subtask 3, our team aims on using contrastive loss to increase the understanding of the number from their different expression, and knows to identify between different number and its respective expression. This system description paper uses T5 and BART as the baseline model, comparing its result with and without the constrative loss. The result shows that BART with contrastive loss have excelled all the models, and its performance on the number accuracy has the highest performance among all.
%R 10.18653/v1/2024.semeval-1.236
%U https://aclanthology.org/2024.semeval-1.236
%U https://doi.org/10.18653/v1/2024.semeval-1.236
%P 1659-1662
Markdown (Informal)
[Challenges at SemEval 2024 Task 7: Contrastive Learning Approach on Numeral-Aware Language Generation](https://aclanthology.org/2024.semeval-1.236) (Zhunis & Chuang, SemEval 2024)
ACL