@inproceedings{zhou-etal-2021-hulk,
title = "{HULK}: An Energy Efficiency Benchmark Platform for Responsible Natural Language Processing",
author = "Zhou, Xiyou and
Chen, Zhiyu and
Jin, Xiaoyong and
Wang, William Yang",
editor = "Gkatzia, Dimitra and
Seddah, Djam{\'e}",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.eacl-demos.39",
doi = "10.18653/v1/2021.eacl-demos.39",
pages = "329--336",
abstract = "Computation-intensive pretrained models have been taking the lead of many natural language processing benchmarks such as GLUE. However, energy efficiency in the process of model training and inference becomes a critical bottleneck. We introduce HULK, a multi-task energy efficiency benchmarking platform for responsible natural language processing. With HULK, we compare pretrained models{'} energy efficiency from the perspectives of time and cost. Baseline benchmarking results are provided for further analysis. The fine-tuning efficiency of different pretrained models can differ significantly among different tasks, and fewer parameter number does not necessarily imply better efficiency. We analyzed such a phenomenon and demonstrated the method for comparing the multi-task efficiency of pretrained models. Our platform is available at \url{https://hulkbenchmark.github.io/} .",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-etal-2021-hulk">
<titleInfo>
<title>HULK: An Energy Efficiency Benchmark Platform for Responsible Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiyou</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhiyu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaoyong</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="given">Yang</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dimitra</namePart>
<namePart type="family">Gkatzia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Djamé</namePart>
<namePart type="family">Seddah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Computation-intensive pretrained models have been taking the lead of many natural language processing benchmarks such as GLUE. However, energy efficiency in the process of model training and inference becomes a critical bottleneck. We introduce HULK, a multi-task energy efficiency benchmarking platform for responsible natural language processing. With HULK, we compare pretrained models’ energy efficiency from the perspectives of time and cost. Baseline benchmarking results are provided for further analysis. The fine-tuning efficiency of different pretrained models can differ significantly among different tasks, and fewer parameter number does not necessarily imply better efficiency. We analyzed such a phenomenon and demonstrated the method for comparing the multi-task efficiency of pretrained models. Our platform is available at https://hulkbenchmark.github.io/ .</abstract>
<identifier type="citekey">zhou-etal-2021-hulk</identifier>
<identifier type="doi">10.18653/v1/2021.eacl-demos.39</identifier>
<location>
<url>https://aclanthology.org/2021.eacl-demos.39</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>329</start>
<end>336</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T HULK: An Energy Efficiency Benchmark Platform for Responsible Natural Language Processing
%A Zhou, Xiyou
%A Chen, Zhiyu
%A Jin, Xiaoyong
%A Wang, William Yang
%Y Gkatzia, Dimitra
%Y Seddah, Djamé
%S Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations
%D 2021
%8 April
%I Association for Computational Linguistics
%C Online
%F zhou-etal-2021-hulk
%X Computation-intensive pretrained models have been taking the lead of many natural language processing benchmarks such as GLUE. However, energy efficiency in the process of model training and inference becomes a critical bottleneck. We introduce HULK, a multi-task energy efficiency benchmarking platform for responsible natural language processing. With HULK, we compare pretrained models’ energy efficiency from the perspectives of time and cost. Baseline benchmarking results are provided for further analysis. The fine-tuning efficiency of different pretrained models can differ significantly among different tasks, and fewer parameter number does not necessarily imply better efficiency. We analyzed such a phenomenon and demonstrated the method for comparing the multi-task efficiency of pretrained models. Our platform is available at https://hulkbenchmark.github.io/ .
%R 10.18653/v1/2021.eacl-demos.39
%U https://aclanthology.org/2021.eacl-demos.39
%U https://doi.org/10.18653/v1/2021.eacl-demos.39
%P 329-336
Markdown (Informal)
[HULK: An Energy Efficiency Benchmark Platform for Responsible Natural Language Processing](https://aclanthology.org/2021.eacl-demos.39) (Zhou et al., EACL 2021)
ACL