@inproceedings{hakimi-parizi-etal-2019-unbnlp,
title = "{UNBNLP} at {S}em{E}val-2019 Task 5 and 6: Using Language Models to Detect Hate Speech and Offensive Language",
author = "Hakimi Parizi, Ali and
King, Milton and
Cook, Paul",
editor = "May, Jonathan and
Shutova, Ekaterina and
Herbelot, Aurelie and
Zhu, Xiaodan and
Apidianaki, Marianna and
Mohammad, Saif M.",
booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S19-2092",
doi = "10.18653/v1/S19-2092",
pages = "514--518",
abstract = "In this paper we apply a range of approaches to language modeling {--} including word-level n-gram and neural language models, and character-level neural language models {--} to the problem of detecting hate speech and offensive language. Our findings indicate that language models are able to capture knowledge of whether text is hateful or offensive. However, our findings also indicate that more conventional approaches to text classification often perform similarly or better.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hakimi-parizi-etal-2019-unbnlp">
<titleInfo>
<title>UNBNLP at SemEval-2019 Task 5 and 6: Using Language Models to Detect Hate Speech and Offensive Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Hakimi Parizi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milton</namePart>
<namePart type="family">King</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="family">Cook</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 13th International Workshop on Semantic Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">May</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aurelie</namePart>
<namePart type="family">Herbelot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaodan</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saif</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Mohammad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we apply a range of approaches to language modeling – including word-level n-gram and neural language models, and character-level neural language models – to the problem of detecting hate speech and offensive language. Our findings indicate that language models are able to capture knowledge of whether text is hateful or offensive. However, our findings also indicate that more conventional approaches to text classification often perform similarly or better.</abstract>
<identifier type="citekey">hakimi-parizi-etal-2019-unbnlp</identifier>
<identifier type="doi">10.18653/v1/S19-2092</identifier>
<location>
<url>https://aclanthology.org/S19-2092</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>514</start>
<end>518</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T UNBNLP at SemEval-2019 Task 5 and 6: Using Language Models to Detect Hate Speech and Offensive Language
%A Hakimi Parizi, Ali
%A King, Milton
%A Cook, Paul
%Y May, Jonathan
%Y Shutova, Ekaterina
%Y Herbelot, Aurelie
%Y Zhu, Xiaodan
%Y Apidianaki, Marianna
%Y Mohammad, Saif M.
%S Proceedings of the 13th International Workshop on Semantic Evaluation
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota, USA
%F hakimi-parizi-etal-2019-unbnlp
%X In this paper we apply a range of approaches to language modeling – including word-level n-gram and neural language models, and character-level neural language models – to the problem of detecting hate speech and offensive language. Our findings indicate that language models are able to capture knowledge of whether text is hateful or offensive. However, our findings also indicate that more conventional approaches to text classification often perform similarly or better.
%R 10.18653/v1/S19-2092
%U https://aclanthology.org/S19-2092
%U https://doi.org/10.18653/v1/S19-2092
%P 514-518
Markdown (Informal)
[UNBNLP at SemEval-2019 Task 5 and 6: Using Language Models to Detect Hate Speech and Offensive Language](https://aclanthology.org/S19-2092) (Hakimi Parizi et al., SemEval 2019)
ACL