@inproceedings{thomas-etal-2024-ltrc-iiith,
title = "{LTRC}-{IIITH} at {MEDIQA}-{M}3{G} 2024: Medical Visual Question Answering with Vision-Language Models",
author = "Thomas, Jerrin and
Marimuthu, Sushvin and
Krishnamurthy, Parameswari",
editor = "Naumann, Tristan and
Ben Abacha, Asma and
Bethard, Steven and
Roberts, Kirk and
Bitterman, Danielle",
booktitle = "Proceedings of the 6th Clinical Natural Language Processing Workshop",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.clinicalnlp-1.67",
doi = "10.18653/v1/2024.clinicalnlp-1.67",
pages = "703--707",
abstract = "In this paper, we present our work to the MEDIQA-M3G 2024 shared task, which tackles multilingual and multimodal medical answer generation. Our system consists of a lightweight Vision-and-Language Transformer (ViLT) model which is fine-tuned for the clinical dermatology visual question-answering task. In the official leaderboard for the task, our system ranks 6th. After the challenge, we experiment with training the ViLT model on more data. We also explore the capabilities of large Vision-Language Models (VLMs) such as Gemini and LLaVA.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="thomas-etal-2024-ltrc-iiith">
<titleInfo>
<title>LTRC-IIITH at MEDIQA-M3G 2024: Medical Visual Question Answering with Vision-Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jerrin</namePart>
<namePart type="family">Thomas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sushvin</namePart>
<namePart type="family">Marimuthu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Parameswari</namePart>
<namePart type="family">Krishnamurthy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 6th Clinical Natural Language Processing Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tristan</namePart>
<namePart type="family">Naumann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asma</namePart>
<namePart type="family">Ben Abacha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kirk</namePart>
<namePart type="family">Roberts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Danielle</namePart>
<namePart type="family">Bitterman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we present our work to the MEDIQA-M3G 2024 shared task, which tackles multilingual and multimodal medical answer generation. Our system consists of a lightweight Vision-and-Language Transformer (ViLT) model which is fine-tuned for the clinical dermatology visual question-answering task. In the official leaderboard for the task, our system ranks 6th. After the challenge, we experiment with training the ViLT model on more data. We also explore the capabilities of large Vision-Language Models (VLMs) such as Gemini and LLaVA.</abstract>
<identifier type="citekey">thomas-etal-2024-ltrc-iiith</identifier>
<identifier type="doi">10.18653/v1/2024.clinicalnlp-1.67</identifier>
<location>
<url>https://aclanthology.org/2024.clinicalnlp-1.67</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>703</start>
<end>707</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LTRC-IIITH at MEDIQA-M3G 2024: Medical Visual Question Answering with Vision-Language Models
%A Thomas, Jerrin
%A Marimuthu, Sushvin
%A Krishnamurthy, Parameswari
%Y Naumann, Tristan
%Y Ben Abacha, Asma
%Y Bethard, Steven
%Y Roberts, Kirk
%Y Bitterman, Danielle
%S Proceedings of the 6th Clinical Natural Language Processing Workshop
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F thomas-etal-2024-ltrc-iiith
%X In this paper, we present our work to the MEDIQA-M3G 2024 shared task, which tackles multilingual and multimodal medical answer generation. Our system consists of a lightweight Vision-and-Language Transformer (ViLT) model which is fine-tuned for the clinical dermatology visual question-answering task. In the official leaderboard for the task, our system ranks 6th. After the challenge, we experiment with training the ViLT model on more data. We also explore the capabilities of large Vision-Language Models (VLMs) such as Gemini and LLaVA.
%R 10.18653/v1/2024.clinicalnlp-1.67
%U https://aclanthology.org/2024.clinicalnlp-1.67
%U https://doi.org/10.18653/v1/2024.clinicalnlp-1.67
%P 703-707
Markdown (Informal)
[LTRC-IIITH at MEDIQA-M3G 2024: Medical Visual Question Answering with Vision-Language Models](https://aclanthology.org/2024.clinicalnlp-1.67) (Thomas et al., ClinicalNLP-WS 2024)
ACL