@inproceedings{moreno-aviles-vaca-2025-extracting,
title = "Extracting Financial Causality through {QA}: Insights from {F}in{C}ausal 2025 {S}panish Subtask",
author = "Moreno Aviles, Marcelo Jose and
Vaca, Alejandro",
editor = "Chen, Chung-Chi and
Moreno-Sandoval, Antonio and
Huang, Jimin and
Xie, Qianqian and
Ananiadou, Sophia and
Chen, Hsin-Hsi",
booktitle = "Proceedings of the Joint Workshop of the 9th Financial Technology and Natural Language Processing (FinNLP), the 6th Financial Narrative Processing (FNP), and the 1st Workshop on Large Language Models for Finance and Legal (LLMFinLegal)",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.finnlp-1.29/",
pages = "265--270",
abstract = "The methodology tested both span extraction and generative tasks, with generative models ultimately proving to be more effective. SuperLenia, a private generative model, was the best-performing model. It is a combination of public models with sizes ranging from 7B to 8B parameters. SuperLenia was fine-tuned using QLoRA in a chat-based framework, and hyperparameter tuned during inference, including adjustments to temperature and sampling, further enhanced its performance."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="moreno-aviles-vaca-2025-extracting">
<titleInfo>
<title>Extracting Financial Causality through QA: Insights from FinCausal 2025 Spanish Subtask</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcelo</namePart>
<namePart type="given">Jose</namePart>
<namePart type="family">Moreno Aviles</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alejandro</namePart>
<namePart type="family">Vaca</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Joint Workshop of the 9th Financial Technology and Natural Language Processing (FinNLP), the 6th Financial Narrative Processing (FNP), and the 1st Workshop on Large Language Models for Finance and Legal (LLMFinLegal)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chung-Chi</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Antonio</namePart>
<namePart type="family">Moreno-Sandoval</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jimin</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qianqian</namePart>
<namePart type="family">Xie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hsin-Hsi</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The methodology tested both span extraction and generative tasks, with generative models ultimately proving to be more effective. SuperLenia, a private generative model, was the best-performing model. It is a combination of public models with sizes ranging from 7B to 8B parameters. SuperLenia was fine-tuned using QLoRA in a chat-based framework, and hyperparameter tuned during inference, including adjustments to temperature and sampling, further enhanced its performance.</abstract>
<identifier type="citekey">moreno-aviles-vaca-2025-extracting</identifier>
<location>
<url>https://aclanthology.org/2025.finnlp-1.29/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>265</start>
<end>270</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Extracting Financial Causality through QA: Insights from FinCausal 2025 Spanish Subtask
%A Moreno Aviles, Marcelo Jose
%A Vaca, Alejandro
%Y Chen, Chung-Chi
%Y Moreno-Sandoval, Antonio
%Y Huang, Jimin
%Y Xie, Qianqian
%Y Ananiadou, Sophia
%Y Chen, Hsin-Hsi
%S Proceedings of the Joint Workshop of the 9th Financial Technology and Natural Language Processing (FinNLP), the 6th Financial Narrative Processing (FNP), and the 1st Workshop on Large Language Models for Finance and Legal (LLMFinLegal)
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F moreno-aviles-vaca-2025-extracting
%X The methodology tested both span extraction and generative tasks, with generative models ultimately proving to be more effective. SuperLenia, a private generative model, was the best-performing model. It is a combination of public models with sizes ranging from 7B to 8B parameters. SuperLenia was fine-tuned using QLoRA in a chat-based framework, and hyperparameter tuned during inference, including adjustments to temperature and sampling, further enhanced its performance.
%U https://aclanthology.org/2025.finnlp-1.29/
%P 265-270
Markdown (Informal)
[Extracting Financial Causality through QA: Insights from FinCausal 2025 Spanish Subtask](https://aclanthology.org/2025.finnlp-1.29/) (Moreno Aviles & Vaca, FinNLP 2025)
ACL
- Marcelo Jose Moreno Aviles and Alejandro Vaca. 2025. Extracting Financial Causality through QA: Insights from FinCausal 2025 Spanish Subtask. In Proceedings of the Joint Workshop of the 9th Financial Technology and Natural Language Processing (FinNLP), the 6th Financial Narrative Processing (FNP), and the 1st Workshop on Large Language Models for Finance and Legal (LLMFinLegal), pages 265–270, Abu Dhabi, UAE. Association for Computational Linguistics.