@inproceedings{schmaltz-2018-utility,
title = "On the Utility of Lay Summaries and {AI} Safety Disclosures: Toward Robust, Open Research Oversight",
author = "Schmaltz, Allen",
editor = "Alfano, Mark and
Hovy, Dirk and
Mitchell, Margaret and
Strube, Michael",
booktitle = "Proceedings of the Second {ACL} Workshop on Ethics in Natural Language Processing",
month = jun,
year = "2018",
address = "New Orleans, Louisiana, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-0801",
doi = "10.18653/v1/W18-0801",
pages = "1--6",
abstract = "In this position paper, we propose that the community consider encouraging researchers to include two riders, a {``}Lay Summary{''} and an {``}AI Safety Disclosure{''}, as part of future NLP papers published in ACL forums that present user-facing systems. The goal is to encourage researchers{--}via a relatively non-intrusive mechanism{--}to consider the societal implications of technologies carrying (un)known and/or (un)knowable long-term risks, to highlight failure cases, and to provide a mechanism by which the general public (and scientists in other disciplines) can more readily engage in the discussion in an informed manner. This simple proposal requires minimal additional up-front costs for researchers; the lay summary, at least, has significant precedence in the medical literature and other areas of science; and the proposal is aimed to supplement, rather than replace, existing approaches for encouraging researchers to consider the ethical implications of their work, such as those of the Collaborative Institutional Training Initiative (CITI) Program and institutional review boards (IRBs).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="schmaltz-2018-utility">
<titleInfo>
<title>On the Utility of Lay Summaries and AI Safety Disclosures: Toward Robust, Open Research Oversight</title>
</titleInfo>
<name type="personal">
<namePart type="given">Allen</namePart>
<namePart type="family">Schmaltz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second ACL Workshop on Ethics in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Alfano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margaret</namePart>
<namePart type="family">Mitchell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Strube</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this position paper, we propose that the community consider encouraging researchers to include two riders, a “Lay Summary” and an “AI Safety Disclosure”, as part of future NLP papers published in ACL forums that present user-facing systems. The goal is to encourage researchers–via a relatively non-intrusive mechanism–to consider the societal implications of technologies carrying (un)known and/or (un)knowable long-term risks, to highlight failure cases, and to provide a mechanism by which the general public (and scientists in other disciplines) can more readily engage in the discussion in an informed manner. This simple proposal requires minimal additional up-front costs for researchers; the lay summary, at least, has significant precedence in the medical literature and other areas of science; and the proposal is aimed to supplement, rather than replace, existing approaches for encouraging researchers to consider the ethical implications of their work, such as those of the Collaborative Institutional Training Initiative (CITI) Program and institutional review boards (IRBs).</abstract>
<identifier type="citekey">schmaltz-2018-utility</identifier>
<identifier type="doi">10.18653/v1/W18-0801</identifier>
<location>
<url>https://aclanthology.org/W18-0801</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>1</start>
<end>6</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On the Utility of Lay Summaries and AI Safety Disclosures: Toward Robust, Open Research Oversight
%A Schmaltz, Allen
%Y Alfano, Mark
%Y Hovy, Dirk
%Y Mitchell, Margaret
%Y Strube, Michael
%S Proceedings of the Second ACL Workshop on Ethics in Natural Language Processing
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana, USA
%F schmaltz-2018-utility
%X In this position paper, we propose that the community consider encouraging researchers to include two riders, a “Lay Summary” and an “AI Safety Disclosure”, as part of future NLP papers published in ACL forums that present user-facing systems. The goal is to encourage researchers–via a relatively non-intrusive mechanism–to consider the societal implications of technologies carrying (un)known and/or (un)knowable long-term risks, to highlight failure cases, and to provide a mechanism by which the general public (and scientists in other disciplines) can more readily engage in the discussion in an informed manner. This simple proposal requires minimal additional up-front costs for researchers; the lay summary, at least, has significant precedence in the medical literature and other areas of science; and the proposal is aimed to supplement, rather than replace, existing approaches for encouraging researchers to consider the ethical implications of their work, such as those of the Collaborative Institutional Training Initiative (CITI) Program and institutional review boards (IRBs).
%R 10.18653/v1/W18-0801
%U https://aclanthology.org/W18-0801
%U https://doi.org/10.18653/v1/W18-0801
%P 1-6
Markdown (Informal)
[On the Utility of Lay Summaries and AI Safety Disclosures: Toward Robust, Open Research Oversight](https://aclanthology.org/W18-0801) (Schmaltz, EthNLP 2018)
ACL