@inproceedings{ghosh-etal-2023-text,
title = "Text-Derived Knowledge Helps Vision: A Simple Cross-modal Distillation for Video-based Action Anticipation",
author = "Ghosh, Sayontan and
Aggarwal, Tanvi and
Hoai, Minh and
Balasubramanian, Niranjan",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.141",
doi = "10.18653/v1/2023.findings-eacl.141",
pages = "1882--1897",
abstract = "Anticipating future actions in a video is useful for many autonomous and assistive technologies. Prior action anticipation work mostly treat this as a vision modality problem, where the models learn the task information primarily from the video features in the action anticipation datasets. However, knowledge about action sequences can also be obtained from external textual data. In this work, we show how knowledge in pretrained language models can be adapted and distilled into vision based action anticipation models. We show that a simple distillation technique can achieve effective knowledge transfer and provide consistent gains on a strong vision model (Anticipative Vision Transformer) for two action anticipation datasets (3.5{\%} relative gain on EGTEA-GAZE+ and 7.2{\%} relative gain on EPIC-KITCHEN 55), giving a new state-of-the-art result.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ghosh-etal-2023-text">
<titleInfo>
<title>Text-Derived Knowledge Helps Vision: A Simple Cross-modal Distillation for Video-based Action Anticipation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sayontan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanvi</namePart>
<namePart type="family">Aggarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Minh</namePart>
<namePart type="family">Hoai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Niranjan</namePart>
<namePart type="family">Balasubramanian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Anticipating future actions in a video is useful for many autonomous and assistive technologies. Prior action anticipation work mostly treat this as a vision modality problem, where the models learn the task information primarily from the video features in the action anticipation datasets. However, knowledge about action sequences can also be obtained from external textual data. In this work, we show how knowledge in pretrained language models can be adapted and distilled into vision based action anticipation models. We show that a simple distillation technique can achieve effective knowledge transfer and provide consistent gains on a strong vision model (Anticipative Vision Transformer) for two action anticipation datasets (3.5% relative gain on EGTEA-GAZE+ and 7.2% relative gain on EPIC-KITCHEN 55), giving a new state-of-the-art result.</abstract>
<identifier type="citekey">ghosh-etal-2023-text</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.141</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.141</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>1882</start>
<end>1897</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Text-Derived Knowledge Helps Vision: A Simple Cross-modal Distillation for Video-based Action Anticipation
%A Ghosh, Sayontan
%A Aggarwal, Tanvi
%A Hoai, Minh
%A Balasubramanian, Niranjan
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F ghosh-etal-2023-text
%X Anticipating future actions in a video is useful for many autonomous and assistive technologies. Prior action anticipation work mostly treat this as a vision modality problem, where the models learn the task information primarily from the video features in the action anticipation datasets. However, knowledge about action sequences can also be obtained from external textual data. In this work, we show how knowledge in pretrained language models can be adapted and distilled into vision based action anticipation models. We show that a simple distillation technique can achieve effective knowledge transfer and provide consistent gains on a strong vision model (Anticipative Vision Transformer) for two action anticipation datasets (3.5% relative gain on EGTEA-GAZE+ and 7.2% relative gain on EPIC-KITCHEN 55), giving a new state-of-the-art result.
%R 10.18653/v1/2023.findings-eacl.141
%U https://aclanthology.org/2023.findings-eacl.141
%U https://doi.org/10.18653/v1/2023.findings-eacl.141
%P 1882-1897
Markdown (Informal)
[Text-Derived Knowledge Helps Vision: A Simple Cross-modal Distillation for Video-based Action Anticipation](https://aclanthology.org/2023.findings-eacl.141) (Ghosh et al., Findings 2023)
ACL