@inproceedings{utescher-2019-visual,
title = "Visual {TTR} - Modelling Visual Question Answering in Type Theory with Records",
author = "Utescher, Ronja",
editor = "Dobnik, Simon and
Chatzikyriakidis, Stergios and
Demberg, Vera and
Abu Kwaik, Kathrein and
Maraev, Vladislav",
booktitle = "Proceedings of the 13th International Conference on Computational Semantics - Student Papers",
month = may,
year = "2019",
address = "Gothenburg, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-0602",
doi = "10.18653/v1/W19-0602",
pages = "9--14",
abstract = "In this paper, I will describe a system that was developed for the task of Visual Question Answering. The system uses the rich type universe of Type Theory with Records (TTR) to model both the utterances about the image, the image itself and classifications made related to the two. At its most basic, the decision of whether any given predicate can be assigned to an object in the image is delegated to a CNN. Consequently, images can be judged as evidence for propositions. The end result is a model whose application of perceptual classifiers to a given image is guided by the accompanying utterance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="utescher-2019-visual">
<titleInfo>
<title>Visual TTR - Modelling Visual Question Answering in Type Theory with Records</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ronja</namePart>
<namePart type="family">Utescher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 13th International Conference on Computational Semantics - Student Papers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Dobnik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stergios</namePart>
<namePart type="family">Chatzikyriakidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kathrein</namePart>
<namePart type="family">Abu Kwaik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vladislav</namePart>
<namePart type="family">Maraev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gothenburg, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, I will describe a system that was developed for the task of Visual Question Answering. The system uses the rich type universe of Type Theory with Records (TTR) to model both the utterances about the image, the image itself and classifications made related to the two. At its most basic, the decision of whether any given predicate can be assigned to an object in the image is delegated to a CNN. Consequently, images can be judged as evidence for propositions. The end result is a model whose application of perceptual classifiers to a given image is guided by the accompanying utterance.</abstract>
<identifier type="citekey">utescher-2019-visual</identifier>
<identifier type="doi">10.18653/v1/W19-0602</identifier>
<location>
<url>https://aclanthology.org/W19-0602</url>
</location>
<part>
<date>2019-05</date>
<extent unit="page">
<start>9</start>
<end>14</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Visual TTR - Modelling Visual Question Answering in Type Theory with Records
%A Utescher, Ronja
%Y Dobnik, Simon
%Y Chatzikyriakidis, Stergios
%Y Demberg, Vera
%Y Abu Kwaik, Kathrein
%Y Maraev, Vladislav
%S Proceedings of the 13th International Conference on Computational Semantics - Student Papers
%D 2019
%8 May
%I Association for Computational Linguistics
%C Gothenburg, Sweden
%F utescher-2019-visual
%X In this paper, I will describe a system that was developed for the task of Visual Question Answering. The system uses the rich type universe of Type Theory with Records (TTR) to model both the utterances about the image, the image itself and classifications made related to the two. At its most basic, the decision of whether any given predicate can be assigned to an object in the image is delegated to a CNN. Consequently, images can be judged as evidence for propositions. The end result is a model whose application of perceptual classifiers to a given image is guided by the accompanying utterance.
%R 10.18653/v1/W19-0602
%U https://aclanthology.org/W19-0602
%U https://doi.org/10.18653/v1/W19-0602
%P 9-14
Markdown (Informal)
[Visual TTR - Modelling Visual Question Answering in Type Theory with Records](https://aclanthology.org/W19-0602) (Utescher, IWCS 2019)
ACL