@inproceedings{fraser-etal-2022-moral,
title = "Does Moral Code have a Moral Code? Probing Delphi{'}s Moral Philosophy",
author = "Fraser, Kathleen C. and
Kiritchenko, Svetlana and
Balkir, Esma",
editor = "Verma, Apurv and
Pruksachatkun, Yada and
Chang, Kai-Wei and
Galstyan, Aram and
Dhamala, Jwala and
Cao, Yang Trista",
booktitle = "Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)",
month = jul,
year = "2022",
address = "Seattle, U.S.A.",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.trustnlp-1.3",
doi = "10.18653/v1/2022.trustnlp-1.3",
pages = "26--42",
abstract = "In an effort to guarantee that machine learning model outputs conform with human moral values, recent work has begun exploring the possibility of explicitly training models to learn the difference between right and wrong. This is typically done in a bottom-up fashion, by exposing the model to different scenarios, annotated with human moral judgements. One question, however, is whether the trained models actually learn any consistent, higher-level ethical principles from these datasets {--} and if so, what? Here, we probe the Allen AI Delphi model with a set of standardized morality questionnaires, and find that, despite some inconsistencies, Delphi tends to mirror the moral principles associated with the demographic groups involved in the annotation process. We question whether this is desirable and discuss how we might move forward with this knowledge.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fraser-etal-2022-moral">
<titleInfo>
<title>Does Moral Code have a Moral Code? Probing Delphi’s Moral Philosophy</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kathleen</namePart>
<namePart type="given">C</namePart>
<namePart type="family">Fraser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Svetlana</namePart>
<namePart type="family">Kiritchenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Esma</namePart>
<namePart type="family">Balkir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Apurv</namePart>
<namePart type="family">Verma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yada</namePart>
<namePart type="family">Pruksachatkun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aram</namePart>
<namePart type="family">Galstyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jwala</namePart>
<namePart type="family">Dhamala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Trista</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, U.S.A.</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In an effort to guarantee that machine learning model outputs conform with human moral values, recent work has begun exploring the possibility of explicitly training models to learn the difference between right and wrong. This is typically done in a bottom-up fashion, by exposing the model to different scenarios, annotated with human moral judgements. One question, however, is whether the trained models actually learn any consistent, higher-level ethical principles from these datasets – and if so, what? Here, we probe the Allen AI Delphi model with a set of standardized morality questionnaires, and find that, despite some inconsistencies, Delphi tends to mirror the moral principles associated with the demographic groups involved in the annotation process. We question whether this is desirable and discuss how we might move forward with this knowledge.</abstract>
<identifier type="citekey">fraser-etal-2022-moral</identifier>
<identifier type="doi">10.18653/v1/2022.trustnlp-1.3</identifier>
<location>
<url>https://aclanthology.org/2022.trustnlp-1.3</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>26</start>
<end>42</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Does Moral Code have a Moral Code? Probing Delphi’s Moral Philosophy
%A Fraser, Kathleen C.
%A Kiritchenko, Svetlana
%A Balkir, Esma
%Y Verma, Apurv
%Y Pruksachatkun, Yada
%Y Chang, Kai-Wei
%Y Galstyan, Aram
%Y Dhamala, Jwala
%Y Cao, Yang Trista
%S Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, U.S.A.
%F fraser-etal-2022-moral
%X In an effort to guarantee that machine learning model outputs conform with human moral values, recent work has begun exploring the possibility of explicitly training models to learn the difference between right and wrong. This is typically done in a bottom-up fashion, by exposing the model to different scenarios, annotated with human moral judgements. One question, however, is whether the trained models actually learn any consistent, higher-level ethical principles from these datasets – and if so, what? Here, we probe the Allen AI Delphi model with a set of standardized morality questionnaires, and find that, despite some inconsistencies, Delphi tends to mirror the moral principles associated with the demographic groups involved in the annotation process. We question whether this is desirable and discuss how we might move forward with this knowledge.
%R 10.18653/v1/2022.trustnlp-1.3
%U https://aclanthology.org/2022.trustnlp-1.3
%U https://doi.org/10.18653/v1/2022.trustnlp-1.3
%P 26-42
Markdown (Informal)
[Does Moral Code have a Moral Code? Probing Delphi’s Moral Philosophy](https://aclanthology.org/2022.trustnlp-1.3) (Fraser et al., TrustNLP 2022)
ACL