@inproceedings{khan-khan-2024-sumotosima,
title = "Sumotosima : A Framework and Dataset for Classifying and Summarizing Otoscopic Images",
author = "Khan, Eram Anwarul and
Khan, Anas Anwarul Haq",
editor = "Lalitha Devi, Sobha and
Arora, Karunesh",
booktitle = "Proceedings of the 21st International Conference on Natural Language Processing (ICON)",
month = dec,
year = "2024",
address = "AU-KBC Research Centre, Chennai, India",
publisher = "NLP Association of India (NLPAI)",
url = "https://aclanthology.org/2024.icon-1.1/",
pages = "1--11",
abstract = "Otoscopy is a diagnostic procedure to examine the ear canal and eardrum using an otoscope. It identifies conditions like infections, foreign bodies, eardrum perforations, and ear abnormalities. We propose a novel resource-efficient deep learning and transformer-based framework, Sumotosima (Summarizer for Otoscopic Images), which provides an end-to-end pipeline for classification followed by summarization. Our framework utilizes a combination of triplet and cross-entropy losses. Additionally, we use Knowledge Enhanced Multimodal BART, where the input is fused textual and image embeddings. The objective is to deliver summaries that are well-suited for patients, ensuring clarity and efficiency in understanding otoscopic images. Given the lack of existing datasets, we have curated our own OCASD (Otoscopy Classification And Summary Dataset), which includes 500 images with 5 unique categories, annotated with their class and summaries by otolaryngologists. Sumotosima achieved a result of 98.03{\%}, which is 7.00{\%}, 3.10{\%}, and 3.01{\%} higher than K-Nearest Neighbors, Random Forest, and Support Vector Machines, respectively, in classification tasks. For summarization, Sumotosima outperformed GPT-4o and LLaVA by 88.53{\%} and 107.57{\%} in ROUGE scores, respectively. We have made our code and dataset publicly available at https://github.com/anas2908/Sumotosima"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khan-khan-2024-sumotosima">
<titleInfo>
<title>Sumotosima : A Framework and Dataset for Classifying and Summarizing Otoscopic Images</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eram</namePart>
<namePart type="given">Anwarul</namePart>
<namePart type="family">Khan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anas</namePart>
<namePart type="given">Anwarul</namePart>
<namePart type="given">Haq</namePart>
<namePart type="family">Khan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st International Conference on Natural Language Processing (ICON)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sobha</namePart>
<namePart type="family">Lalitha Devi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karunesh</namePart>
<namePart type="family">Arora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>NLP Association of India (NLPAI)</publisher>
<place>
<placeTerm type="text">AU-KBC Research Centre, Chennai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Otoscopy is a diagnostic procedure to examine the ear canal and eardrum using an otoscope. It identifies conditions like infections, foreign bodies, eardrum perforations, and ear abnormalities. We propose a novel resource-efficient deep learning and transformer-based framework, Sumotosima (Summarizer for Otoscopic Images), which provides an end-to-end pipeline for classification followed by summarization. Our framework utilizes a combination of triplet and cross-entropy losses. Additionally, we use Knowledge Enhanced Multimodal BART, where the input is fused textual and image embeddings. The objective is to deliver summaries that are well-suited for patients, ensuring clarity and efficiency in understanding otoscopic images. Given the lack of existing datasets, we have curated our own OCASD (Otoscopy Classification And Summary Dataset), which includes 500 images with 5 unique categories, annotated with their class and summaries by otolaryngologists. Sumotosima achieved a result of 98.03%, which is 7.00%, 3.10%, and 3.01% higher than K-Nearest Neighbors, Random Forest, and Support Vector Machines, respectively, in classification tasks. For summarization, Sumotosima outperformed GPT-4o and LLaVA by 88.53% and 107.57% in ROUGE scores, respectively. We have made our code and dataset publicly available at https://github.com/anas2908/Sumotosima</abstract>
<identifier type="citekey">khan-khan-2024-sumotosima</identifier>
<location>
<url>https://aclanthology.org/2024.icon-1.1/</url>
</location>
<part>
<date>2024-12</date>
<extent unit="page">
<start>1</start>
<end>11</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sumotosima : A Framework and Dataset for Classifying and Summarizing Otoscopic Images
%A Khan, Eram Anwarul
%A Khan, Anas Anwarul Haq
%Y Lalitha Devi, Sobha
%Y Arora, Karunesh
%S Proceedings of the 21st International Conference on Natural Language Processing (ICON)
%D 2024
%8 December
%I NLP Association of India (NLPAI)
%C AU-KBC Research Centre, Chennai, India
%F khan-khan-2024-sumotosima
%X Otoscopy is a diagnostic procedure to examine the ear canal and eardrum using an otoscope. It identifies conditions like infections, foreign bodies, eardrum perforations, and ear abnormalities. We propose a novel resource-efficient deep learning and transformer-based framework, Sumotosima (Summarizer for Otoscopic Images), which provides an end-to-end pipeline for classification followed by summarization. Our framework utilizes a combination of triplet and cross-entropy losses. Additionally, we use Knowledge Enhanced Multimodal BART, where the input is fused textual and image embeddings. The objective is to deliver summaries that are well-suited for patients, ensuring clarity and efficiency in understanding otoscopic images. Given the lack of existing datasets, we have curated our own OCASD (Otoscopy Classification And Summary Dataset), which includes 500 images with 5 unique categories, annotated with their class and summaries by otolaryngologists. Sumotosima achieved a result of 98.03%, which is 7.00%, 3.10%, and 3.01% higher than K-Nearest Neighbors, Random Forest, and Support Vector Machines, respectively, in classification tasks. For summarization, Sumotosima outperformed GPT-4o and LLaVA by 88.53% and 107.57% in ROUGE scores, respectively. We have made our code and dataset publicly available at https://github.com/anas2908/Sumotosima
%U https://aclanthology.org/2024.icon-1.1/
%P 1-11
Markdown (Informal)
[Sumotosima : A Framework and Dataset for Classifying and Summarizing Otoscopic Images](https://aclanthology.org/2024.icon-1.1/) (Khan & Khan, ICON 2024)
ACL