@inproceedings{hong-etal-2021-fix,
title = "Fix-Filter-Fix: Intuitively Connect Any Models for Effective Bug Fixing",
author = "Hong, Haiwen and
Zhang, Jingfeng and
Zhang, Yin and
Wan, Yao and
Sui, Yulei",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.282",
doi = "10.18653/v1/2021.emnlp-main.282",
pages = "3495--3504",
abstract = "Locating and fixing bugs is a time-consuming task. Most neural machine translation (NMT) based approaches for automatically bug fixing lack generality and do not make full use of the rich information in the source code. In NMT-based bug fixing, we find some predicted code identical to the input buggy code (called unchanged fix) in NMT-based approaches due to high similarity between buggy and fixed code (e.g., the difference may only appear in one particular line). Obviously, unchanged fix is not the correct fix because it is the same as the buggy code that needs to be fixed. Based on these, we propose an intuitive yet effective general framework (called Fix-Filter-Fix or F{\^{}}3) for bug fixing. F{\^{}}3 connects models with our filter mechanism to filter out the last model{'}s unchanged fix to the next. We propose an F{\^{}}3 theory that can quantitatively and accurately calculate the F{\^{}}3 lifting effect. To evaluate, we implement the Seq2Seq Transformer (ST) and the AST2Seq Transformer (AT) to form some basic F{\^{}}3 instances, called F{\^{}}3{\_}ST+AT and F{\^{}}3{\_}AT+ST. Comparing them with single model approaches and many model connection baselines across four datasets validates the effectiveness and generality of F{\^{}}3 and corroborates our findings and methodology.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hong-etal-2021-fix">
<titleInfo>
<title>Fix-Filter-Fix: Intuitively Connect Any Models for Effective Bug Fixing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haiwen</namePart>
<namePart type="family">Hong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jingfeng</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yin</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yao</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulei</namePart>
<namePart type="family">Sui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Scott</namePart>
<namePart type="given">Wen-tau</namePart>
<namePart type="family">Yih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Locating and fixing bugs is a time-consuming task. Most neural machine translation (NMT) based approaches for automatically bug fixing lack generality and do not make full use of the rich information in the source code. In NMT-based bug fixing, we find some predicted code identical to the input buggy code (called unchanged fix) in NMT-based approaches due to high similarity between buggy and fixed code (e.g., the difference may only appear in one particular line). Obviously, unchanged fix is not the correct fix because it is the same as the buggy code that needs to be fixed. Based on these, we propose an intuitive yet effective general framework (called Fix-Filter-Fix or F\³) for bug fixing. F\³ connects models with our filter mechanism to filter out the last model’s unchanged fix to the next. We propose an F\³ theory that can quantitatively and accurately calculate the F\³ lifting effect. To evaluate, we implement the Seq2Seq Transformer (ST) and the AST2Seq Transformer (AT) to form some basic F\³ instances, called F\³_ST+AT and F\³_AT+ST. Comparing them with single model approaches and many model connection baselines across four datasets validates the effectiveness and generality of F\³ and corroborates our findings and methodology.</abstract>
<identifier type="citekey">hong-etal-2021-fix</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.282</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.282</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>3495</start>
<end>3504</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fix-Filter-Fix: Intuitively Connect Any Models for Effective Bug Fixing
%A Hong, Haiwen
%A Zhang, Jingfeng
%A Zhang, Yin
%A Wan, Yao
%A Sui, Yulei
%Y Moens, Marie-Francine
%Y Huang, Xuanjing
%Y Specia, Lucia
%Y Yih, Scott Wen-tau
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F hong-etal-2021-fix
%X Locating and fixing bugs is a time-consuming task. Most neural machine translation (NMT) based approaches for automatically bug fixing lack generality and do not make full use of the rich information in the source code. In NMT-based bug fixing, we find some predicted code identical to the input buggy code (called unchanged fix) in NMT-based approaches due to high similarity between buggy and fixed code (e.g., the difference may only appear in one particular line). Obviously, unchanged fix is not the correct fix because it is the same as the buggy code that needs to be fixed. Based on these, we propose an intuitive yet effective general framework (called Fix-Filter-Fix or F\³) for bug fixing. F\³ connects models with our filter mechanism to filter out the last model’s unchanged fix to the next. We propose an F\³ theory that can quantitatively and accurately calculate the F\³ lifting effect. To evaluate, we implement the Seq2Seq Transformer (ST) and the AST2Seq Transformer (AT) to form some basic F\³ instances, called F\³_ST+AT and F\³_AT+ST. Comparing them with single model approaches and many model connection baselines across four datasets validates the effectiveness and generality of F\³ and corroborates our findings and methodology.
%R 10.18653/v1/2021.emnlp-main.282
%U https://aclanthology.org/2021.emnlp-main.282
%U https://doi.org/10.18653/v1/2021.emnlp-main.282
%P 3495-3504
Markdown (Informal)
[Fix-Filter-Fix: Intuitively Connect Any Models for Effective Bug Fixing](https://aclanthology.org/2021.emnlp-main.282) (Hong et al., EMNLP 2021)
ACL