@inproceedings{ulmer-etal-2022-experimental,
title = "Experimental Standards for Deep Learning in Natural Language Processing Research",
author = {Ulmer, Dennis and
Bassignana, Elisa and
M{\"u}ller-Eberstein, Max and
Varab, Daniel and
Zhang, Mike and
van der Goot, Rob and
Hardmeier, Christian and
Plank, Barbara},
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.196",
doi = "10.18653/v1/2022.findings-emnlp.196",
pages = "2673--2692",
abstract = "The field of Deep Learning (DL) has undergone explosive growth during the last decade, with a substantial impact on Natural Language Processing (NLP) as well. Yet, compared to more established disciplines, a lack of common experimental standards remains an open challenge to the field at large. Starting from fundamental scientific principles, we distill ongoing discussions on experimental standards in NLP into a single, widely-applicable methodology. Following these best practices is crucial to strengthen experimental evidence, improve reproducibility and enable scientific progress. These standards are further collected in a public repository to help them transparently adapt to future needs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ulmer-etal-2022-experimental">
<titleInfo>
<title>Experimental Standards for Deep Learning in Natural Language Processing Research</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dennis</namePart>
<namePart type="family">Ulmer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elisa</namePart>
<namePart type="family">Bassignana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Max</namePart>
<namePart type="family">Müller-Eberstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Varab</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mike</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rob</namePart>
<namePart type="family">van der Goot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="family">Plank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The field of Deep Learning (DL) has undergone explosive growth during the last decade, with a substantial impact on Natural Language Processing (NLP) as well. Yet, compared to more established disciplines, a lack of common experimental standards remains an open challenge to the field at large. Starting from fundamental scientific principles, we distill ongoing discussions on experimental standards in NLP into a single, widely-applicable methodology. Following these best practices is crucial to strengthen experimental evidence, improve reproducibility and enable scientific progress. These standards are further collected in a public repository to help them transparently adapt to future needs.</abstract>
<identifier type="citekey">ulmer-etal-2022-experimental</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.196</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.196</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>2673</start>
<end>2692</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Experimental Standards for Deep Learning in Natural Language Processing Research
%A Ulmer, Dennis
%A Bassignana, Elisa
%A Müller-Eberstein, Max
%A Varab, Daniel
%A Zhang, Mike
%A van der Goot, Rob
%A Hardmeier, Christian
%A Plank, Barbara
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F ulmer-etal-2022-experimental
%X The field of Deep Learning (DL) has undergone explosive growth during the last decade, with a substantial impact on Natural Language Processing (NLP) as well. Yet, compared to more established disciplines, a lack of common experimental standards remains an open challenge to the field at large. Starting from fundamental scientific principles, we distill ongoing discussions on experimental standards in NLP into a single, widely-applicable methodology. Following these best practices is crucial to strengthen experimental evidence, improve reproducibility and enable scientific progress. These standards are further collected in a public repository to help them transparently adapt to future needs.
%R 10.18653/v1/2022.findings-emnlp.196
%U https://aclanthology.org/2022.findings-emnlp.196
%U https://doi.org/10.18653/v1/2022.findings-emnlp.196
%P 2673-2692
Markdown (Informal)
[Experimental Standards for Deep Learning in Natural Language Processing Research](https://aclanthology.org/2022.findings-emnlp.196) (Ulmer et al., Findings 2022)
ACL
- Dennis Ulmer, Elisa Bassignana, Max Müller-Eberstein, Daniel Varab, Mike Zhang, Rob van der Goot, Christian Hardmeier, and Barbara Plank. 2022. Experimental Standards for Deep Learning in Natural Language Processing Research. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 2673–2692, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.