@inproceedings{lin-gu-2023-flats,
title = "{FL}at{S}: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score",
author = "Lin, Haowei and
Gu, Yuntian",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.554",
doi = "10.18653/v1/2023.emnlp-main.554",
pages = "8956--8963",
abstract = "Detecting out-of-distribution (OOD) instances is crucial for NLP models in practical applications. Although numerous OOD detection methods exist, most of them are empirical. Backed by theoretical analysis, this paper advocates for the measurement of the {``}OOD-ness{''} of a test case $\boldsymbol{x}$ through the \textit{likelihood ratio} between out-distribution $\mathcal P_{\textit{out}}$ and in-distribution $\mathcal P_{\textit{in}}$. We argue that the state-of-the-art (SOTA) feature-based OOD detection methods, such as Maha and KNN, are suboptimal since they only estimate in-distribution density $p_{\textit{in}}(\boldsymbol{x})$. To address this issue, we propose \textbf{FLATS}, a principled solution for OOD detection based on likelihood ratio. Moreover, we demonstrate that FLATS can serve as a general framework capable of enhancing other OOD detection methods by incorporating out-distribution density $p_{\textit{out}}(\boldsymbol{x})$ estimation. Experiments show that FLATS establishes a new SOTA on popular benchmarks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lin-gu-2023-flats">
<titleInfo>
<title>FLatS: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haowei</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuntian</namePart>
<namePart type="family">Gu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Detecting out-of-distribution (OOD) instances is crucial for NLP models in practical applications. Although numerous OOD detection methods exist, most of them are empirical. Backed by theoretical analysis, this paper advocates for the measurement of the “OOD-ness” of a test case \boldsymbolx through the likelihood ratio between out-distribution \mathcal P_out and in-distribution \mathcal P_in. We argue that the state-of-the-art (SOTA) feature-based OOD detection methods, such as Maha and KNN, are suboptimal since they only estimate in-distribution density p_in(\boldsymbolx). To address this issue, we propose FLATS, a principled solution for OOD detection based on likelihood ratio. Moreover, we demonstrate that FLATS can serve as a general framework capable of enhancing other OOD detection methods by incorporating out-distribution density p_out(\boldsymbolx) estimation. Experiments show that FLATS establishes a new SOTA on popular benchmarks.</abstract>
<identifier type="citekey">lin-gu-2023-flats</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.554</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.554</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>8956</start>
<end>8963</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T FLatS: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score
%A Lin, Haowei
%A Gu, Yuntian
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F lin-gu-2023-flats
%X Detecting out-of-distribution (OOD) instances is crucial for NLP models in practical applications. Although numerous OOD detection methods exist, most of them are empirical. Backed by theoretical analysis, this paper advocates for the measurement of the “OOD-ness” of a test case \boldsymbolx through the likelihood ratio between out-distribution \mathcal P_out and in-distribution \mathcal P_in. We argue that the state-of-the-art (SOTA) feature-based OOD detection methods, such as Maha and KNN, are suboptimal since they only estimate in-distribution density p_in(\boldsymbolx). To address this issue, we propose FLATS, a principled solution for OOD detection based on likelihood ratio. Moreover, we demonstrate that FLATS can serve as a general framework capable of enhancing other OOD detection methods by incorporating out-distribution density p_out(\boldsymbolx) estimation. Experiments show that FLATS establishes a new SOTA on popular benchmarks.
%R 10.18653/v1/2023.emnlp-main.554
%U https://aclanthology.org/2023.emnlp-main.554
%U https://doi.org/10.18653/v1/2023.emnlp-main.554
%P 8956-8963
Markdown (Informal)
[FLatS: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score](https://aclanthology.org/2023.emnlp-main.554) (Lin & Gu, EMNLP 2023)
ACL