@inproceedings{juhasz-etal-2019-tuefact,
title = "{T}ue{F}act at {S}em{E}val 2019 Task 8: Fact checking in community question answering forums: context matters",
author = "Juh{\'a}sz, R{\'e}ka and
Linnenschmidt, Franziska Barbara and
Roys, Teslin",
editor = "May, Jonathan and
Shutova, Ekaterina and
Herbelot, Aurelie and
Zhu, Xiaodan and
Apidianaki, Marianna and
Mohammad, Saif M.",
booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S19-2206",
doi = "10.18653/v1/S19-2206",
pages = "1176--1179",
abstract = "The SemEval 2019 Task 8 on Fact-Checking in community question answering forums aimed to classify questions into categories and verify the correctness of answers given on the QatarLiving public forum. The task was divided into two subtasks: the first classifying the question, the second the answers. The TueFact system described in this paper used different approaches for the two subtasks. Subtask A makes use of word vectors based on a bag-of-word-ngram model using up to trigrams. Predictions are done using multi-class logistic regression. The official SemEval result lists an accuracy of 0.60. Subtask B uses vectorized character n-grams up to trigrams instead. Predictions are done using a LSTM model and achieved an accuracy of 0.53 on the final SemEval Task 8 evaluation set.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="juhasz-etal-2019-tuefact">
<titleInfo>
<title>TueFact at SemEval 2019 Task 8: Fact checking in community question answering forums: context matters</title>
</titleInfo>
<name type="personal">
<namePart type="given">Réka</namePart>
<namePart type="family">Juhász</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Franziska</namePart>
<namePart type="given">Barbara</namePart>
<namePart type="family">Linnenschmidt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Teslin</namePart>
<namePart type="family">Roys</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 13th International Workshop on Semantic Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">May</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aurelie</namePart>
<namePart type="family">Herbelot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaodan</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saif</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Mohammad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The SemEval 2019 Task 8 on Fact-Checking in community question answering forums aimed to classify questions into categories and verify the correctness of answers given on the QatarLiving public forum. The task was divided into two subtasks: the first classifying the question, the second the answers. The TueFact system described in this paper used different approaches for the two subtasks. Subtask A makes use of word vectors based on a bag-of-word-ngram model using up to trigrams. Predictions are done using multi-class logistic regression. The official SemEval result lists an accuracy of 0.60. Subtask B uses vectorized character n-grams up to trigrams instead. Predictions are done using a LSTM model and achieved an accuracy of 0.53 on the final SemEval Task 8 evaluation set.</abstract>
<identifier type="citekey">juhasz-etal-2019-tuefact</identifier>
<identifier type="doi">10.18653/v1/S19-2206</identifier>
<location>
<url>https://aclanthology.org/S19-2206</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>1176</start>
<end>1179</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TueFact at SemEval 2019 Task 8: Fact checking in community question answering forums: context matters
%A Juhász, Réka
%A Linnenschmidt, Franziska Barbara
%A Roys, Teslin
%Y May, Jonathan
%Y Shutova, Ekaterina
%Y Herbelot, Aurelie
%Y Zhu, Xiaodan
%Y Apidianaki, Marianna
%Y Mohammad, Saif M.
%S Proceedings of the 13th International Workshop on Semantic Evaluation
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota, USA
%F juhasz-etal-2019-tuefact
%X The SemEval 2019 Task 8 on Fact-Checking in community question answering forums aimed to classify questions into categories and verify the correctness of answers given on the QatarLiving public forum. The task was divided into two subtasks: the first classifying the question, the second the answers. The TueFact system described in this paper used different approaches for the two subtasks. Subtask A makes use of word vectors based on a bag-of-word-ngram model using up to trigrams. Predictions are done using multi-class logistic regression. The official SemEval result lists an accuracy of 0.60. Subtask B uses vectorized character n-grams up to trigrams instead. Predictions are done using a LSTM model and achieved an accuracy of 0.53 on the final SemEval Task 8 evaluation set.
%R 10.18653/v1/S19-2206
%U https://aclanthology.org/S19-2206
%U https://doi.org/10.18653/v1/S19-2206
%P 1176-1179
Markdown (Informal)
[TueFact at SemEval 2019 Task 8: Fact checking in community question answering forums: context matters](https://aclanthology.org/S19-2206) (Juhász et al., SemEval 2019)
ACL