@inproceedings{makhija-etal-2025-fractal,
title = "{FRACTAL}: Fine-Grained Scoring from Aggregate Text Labels",
author = "Makhija, Yukti and
Agrawal, Priyanka and
Saket, Rishi and
Raghuveer, Aravindan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-long.822/",
doi = "10.18653/v1/2025.acl-long.822",
pages = "16810--16830",
ISBN = "979-8-89176-251-0",
abstract = "Fine-Tuning of LLMs using RLHF / RLAIF has been shown as a critical step to improve the performance of LLMs in complex generation tasks. These methods typically use response-level human or model feedback for alignment. Recent works indicate that finer sentence or span-level labels provide more accurate and interpretable feedback for LLM optimization. In this work, we propose FRACTAL, a suite of models to disaggregate response-level labels into sentence-level (pseudo-)labels through Multiple Instance Learning (MIL) and Learning from Label Proportions (LLP) formulations, novel usage of prior information, and maximum likelihood calibration. We perform close to 2000 experiments across 6 datasets and 4 tasks that show that FRACTAL can reach up to 93{\%} of the performance of the fully supervised baseline while requiring only around 10{\%} of the gold labels. Furthermore, in a downstream eval, employing step-level pseudo scores in RLHF for a math reasoning task leads to 5{\%} absolute improvement in performance. Our work is the first to develop response-level feedback to sentence-level scoring techniques leveraging sentence-level prior information, along with comprehensive evaluations on multiple tasks as well as end-to-end finetuning evaluations."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="makhija-etal-2025-fractal">
<titleInfo>
<title>FRACTAL: Fine-Grained Scoring from Aggregate Text Labels</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yukti</namePart>
<namePart type="family">Makhija</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Priyanka</namePart>
<namePart type="family">Agrawal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rishi</namePart>
<namePart type="family">Saket</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aravindan</namePart>
<namePart type="family">Raghuveer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-251-0</identifier>
</relatedItem>
<abstract>Fine-Tuning of LLMs using RLHF / RLAIF has been shown as a critical step to improve the performance of LLMs in complex generation tasks. These methods typically use response-level human or model feedback for alignment. Recent works indicate that finer sentence or span-level labels provide more accurate and interpretable feedback for LLM optimization. In this work, we propose FRACTAL, a suite of models to disaggregate response-level labels into sentence-level (pseudo-)labels through Multiple Instance Learning (MIL) and Learning from Label Proportions (LLP) formulations, novel usage of prior information, and maximum likelihood calibration. We perform close to 2000 experiments across 6 datasets and 4 tasks that show that FRACTAL can reach up to 93% of the performance of the fully supervised baseline while requiring only around 10% of the gold labels. Furthermore, in a downstream eval, employing step-level pseudo scores in RLHF for a math reasoning task leads to 5% absolute improvement in performance. Our work is the first to develop response-level feedback to sentence-level scoring techniques leveraging sentence-level prior information, along with comprehensive evaluations on multiple tasks as well as end-to-end finetuning evaluations.</abstract>
<identifier type="citekey">makhija-etal-2025-fractal</identifier>
<identifier type="doi">10.18653/v1/2025.acl-long.822</identifier>
<location>
<url>https://aclanthology.org/2025.acl-long.822/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>16810</start>
<end>16830</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T FRACTAL: Fine-Grained Scoring from Aggregate Text Labels
%A Makhija, Yukti
%A Agrawal, Priyanka
%A Saket, Rishi
%A Raghuveer, Aravindan
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-251-0
%F makhija-etal-2025-fractal
%X Fine-Tuning of LLMs using RLHF / RLAIF has been shown as a critical step to improve the performance of LLMs in complex generation tasks. These methods typically use response-level human or model feedback for alignment. Recent works indicate that finer sentence or span-level labels provide more accurate and interpretable feedback for LLM optimization. In this work, we propose FRACTAL, a suite of models to disaggregate response-level labels into sentence-level (pseudo-)labels through Multiple Instance Learning (MIL) and Learning from Label Proportions (LLP) formulations, novel usage of prior information, and maximum likelihood calibration. We perform close to 2000 experiments across 6 datasets and 4 tasks that show that FRACTAL can reach up to 93% of the performance of the fully supervised baseline while requiring only around 10% of the gold labels. Furthermore, in a downstream eval, employing step-level pseudo scores in RLHF for a math reasoning task leads to 5% absolute improvement in performance. Our work is the first to develop response-level feedback to sentence-level scoring techniques leveraging sentence-level prior information, along with comprehensive evaluations on multiple tasks as well as end-to-end finetuning evaluations.
%R 10.18653/v1/2025.acl-long.822
%U https://aclanthology.org/2025.acl-long.822/
%U https://doi.org/10.18653/v1/2025.acl-long.822
%P 16810-16830
Markdown (Informal)
[FRACTAL: Fine-Grained Scoring from Aggregate Text Labels](https://aclanthology.org/2025.acl-long.822/) (Makhija et al., ACL 2025)
ACL
- Yukti Makhija, Priyanka Agrawal, Rishi Saket, and Aravindan Raghuveer. 2025. FRACTAL: Fine-Grained Scoring from Aggregate Text Labels. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16810–16830, Vienna, Austria. Association for Computational Linguistics.