@inproceedings{aftab-malik-2022-erock,
title = "e{R}ock at Qur{'}an {QA} 2022: Contemporary Deep Neural Networks for Qur{'}an based Reading Comprehension Question Answers",
author = "Aftab, Esha and
Malik, Muhammad Kamran",
editor = "Al-Khalifa, Hend and
Elsayed, Tamer and
Mubarak, Hamdy and
Al-Thubaity, Abdulmohsen and
Magdy, Walid and
Darwish, Kareem",
booktitle = "Proceedinsg of the 5th Workshop on Open-Source Arabic Corpora and Processing Tools with Shared Tasks on Qur'an QA and Fine-Grained Hate Speech Detection",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.osact-1.11",
pages = "96--103",
abstract = "Question Answering (QA) has enticed the interest of NLP community in recent years. NLP enthusiasts are engineering new Models and fine-tuning the existing ones that can give out answers for the posed questions. The deep neural network models are found to perform exceptionally on QA tasks, but these models are also data intensive. For instance, BERT has outperformed many of its contemporary contenders on SQuAD dataset. In this work, we attempt at solving the closed domain reading comprehension Question Answering task on QRCD (Qur{'}anic Reading Comprehension Dataset) to extract an answer span from the provided passage, using BERT as a baseline model. We improved the model{'}s output by applying regularization techniques like weight-decay and data augmentation. Using different strategies we had 0.59{\%} and 0.31{\%} partial Reciprocal Ranking (pRR) on development and testing data splits respectively.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="aftab-malik-2022-erock">
<titleInfo>
<title>eRock at Qur’an QA 2022: Contemporary Deep Neural Networks for Qur’an based Reading Comprehension Question Answers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Esha</namePart>
<namePart type="family">Aftab</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Muhammad</namePart>
<namePart type="given">Kamran</namePart>
<namePart type="family">Malik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedinsg of the 5th Workshop on Open-Source Arabic Corpora and Processing Tools with Shared Tasks on Qur’an QA and Fine-Grained Hate Speech Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tamer</namePart>
<namePart type="family">Elsayed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hamdy</namePart>
<namePart type="family">Mubarak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abdulmohsen</namePart>
<namePart type="family">Al-Thubaity</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Walid</namePart>
<namePart type="family">Magdy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kareem</namePart>
<namePart type="family">Darwish</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Question Answering (QA) has enticed the interest of NLP community in recent years. NLP enthusiasts are engineering new Models and fine-tuning the existing ones that can give out answers for the posed questions. The deep neural network models are found to perform exceptionally on QA tasks, but these models are also data intensive. For instance, BERT has outperformed many of its contemporary contenders on SQuAD dataset. In this work, we attempt at solving the closed domain reading comprehension Question Answering task on QRCD (Qur’anic Reading Comprehension Dataset) to extract an answer span from the provided passage, using BERT as a baseline model. We improved the model’s output by applying regularization techniques like weight-decay and data augmentation. Using different strategies we had 0.59% and 0.31% partial Reciprocal Ranking (pRR) on development and testing data splits respectively.</abstract>
<identifier type="citekey">aftab-malik-2022-erock</identifier>
<location>
<url>https://aclanthology.org/2022.osact-1.11</url>
</location>
<part>
<date>2022-06</date>
<extent unit="page">
<start>96</start>
<end>103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T eRock at Qur’an QA 2022: Contemporary Deep Neural Networks for Qur’an based Reading Comprehension Question Answers
%A Aftab, Esha
%A Malik, Muhammad Kamran
%Y Al-Khalifa, Hend
%Y Elsayed, Tamer
%Y Mubarak, Hamdy
%Y Al-Thubaity, Abdulmohsen
%Y Magdy, Walid
%Y Darwish, Kareem
%S Proceedinsg of the 5th Workshop on Open-Source Arabic Corpora and Processing Tools with Shared Tasks on Qur’an QA and Fine-Grained Hate Speech Detection
%D 2022
%8 June
%I European Language Resources Association
%C Marseille, France
%F aftab-malik-2022-erock
%X Question Answering (QA) has enticed the interest of NLP community in recent years. NLP enthusiasts are engineering new Models and fine-tuning the existing ones that can give out answers for the posed questions. The deep neural network models are found to perform exceptionally on QA tasks, but these models are also data intensive. For instance, BERT has outperformed many of its contemporary contenders on SQuAD dataset. In this work, we attempt at solving the closed domain reading comprehension Question Answering task on QRCD (Qur’anic Reading Comprehension Dataset) to extract an answer span from the provided passage, using BERT as a baseline model. We improved the model’s output by applying regularization techniques like weight-decay and data augmentation. Using different strategies we had 0.59% and 0.31% partial Reciprocal Ranking (pRR) on development and testing data splits respectively.
%U https://aclanthology.org/2022.osact-1.11
%P 96-103
Markdown (Informal)
[eRock at Qur’an QA 2022: Contemporary Deep Neural Networks for Qur’an based Reading Comprehension Question Answers](https://aclanthology.org/2022.osact-1.11) (Aftab & Malik, OSACT 2022)
ACL