@inproceedings{knappich-schrader-2023-controllable,
title = "Controllable Active-Passive Voice Generation using Prefix Tuning",
author = "Knappich, Valentin and
Schrader, Timo Pierre",
editor = "Hardalov, Momchil and
Kancheva, Zara and
Velichkov, Boris and
Nikolova-Koleva, Ivelina and
Slavcheva, Milena",
booktitle = "Proceedings of the 8th Student Research Workshop associated with the International Conference Recent Advances in Natural Language Processing",
month = sep,
year = "2023",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2023.ranlp-stud.3",
pages = "23--32",
abstract = "The prompting paradigm is an uprising trend in the field of Natural Language Processing (NLP) that aims to learn tasks by finding appropriate prompts rather than fine-tuning the model weights. Such prompts can express an intention, e.g., they can instruct a language model to generate a summary of a given event. In this paper, we study how to influence ({''}control{''}) the language generation process such that the outcome fulfills a requested linguistic property. More specifically, we look at controllable active-passive (AP) voice generation, i.e., we require the model to generate a sentence in the requested voice. We build upon the prefix tuning approach and introduce control tokens that are trained on controllable AP generation. We create an AP subset of the WebNLG dataset to fine-tune these control tokens. Among four different models, the one trained with a contrastive learning approach yields the best results in terms of AP accuracy ( 95{\%}) but at the cost of decreased performance on the original WebNLG task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="knappich-schrader-2023-controllable">
<titleInfo>
<title>Controllable Active-Passive Voice Generation using Prefix Tuning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Valentin</namePart>
<namePart type="family">Knappich</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Timo</namePart>
<namePart type="given">Pierre</namePart>
<namePart type="family">Schrader</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 8th Student Research Workshop associated with the International Conference Recent Advances in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Momchil</namePart>
<namePart type="family">Hardalov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zara</namePart>
<namePart type="family">Kancheva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Boris</namePart>
<namePart type="family">Velichkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivelina</namePart>
<namePart type="family">Nikolova-Koleva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milena</namePart>
<namePart type="family">Slavcheva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The prompting paradigm is an uprising trend in the field of Natural Language Processing (NLP) that aims to learn tasks by finding appropriate prompts rather than fine-tuning the model weights. Such prompts can express an intention, e.g., they can instruct a language model to generate a summary of a given event. In this paper, we study how to influence (”control”) the language generation process such that the outcome fulfills a requested linguistic property. More specifically, we look at controllable active-passive (AP) voice generation, i.e., we require the model to generate a sentence in the requested voice. We build upon the prefix tuning approach and introduce control tokens that are trained on controllable AP generation. We create an AP subset of the WebNLG dataset to fine-tune these control tokens. Among four different models, the one trained with a contrastive learning approach yields the best results in terms of AP accuracy ( 95%) but at the cost of decreased performance on the original WebNLG task.</abstract>
<identifier type="citekey">knappich-schrader-2023-controllable</identifier>
<location>
<url>https://aclanthology.org/2023.ranlp-stud.3</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>23</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Controllable Active-Passive Voice Generation using Prefix Tuning
%A Knappich, Valentin
%A Schrader, Timo Pierre
%Y Hardalov, Momchil
%Y Kancheva, Zara
%Y Velichkov, Boris
%Y Nikolova-Koleva, Ivelina
%Y Slavcheva, Milena
%S Proceedings of the 8th Student Research Workshop associated with the International Conference Recent Advances in Natural Language Processing
%D 2023
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F knappich-schrader-2023-controllable
%X The prompting paradigm is an uprising trend in the field of Natural Language Processing (NLP) that aims to learn tasks by finding appropriate prompts rather than fine-tuning the model weights. Such prompts can express an intention, e.g., they can instruct a language model to generate a summary of a given event. In this paper, we study how to influence (”control”) the language generation process such that the outcome fulfills a requested linguistic property. More specifically, we look at controllable active-passive (AP) voice generation, i.e., we require the model to generate a sentence in the requested voice. We build upon the prefix tuning approach and introduce control tokens that are trained on controllable AP generation. We create an AP subset of the WebNLG dataset to fine-tune these control tokens. Among four different models, the one trained with a contrastive learning approach yields the best results in terms of AP accuracy ( 95%) but at the cost of decreased performance on the original WebNLG task.
%U https://aclanthology.org/2023.ranlp-stud.3
%P 23-32
Markdown (Informal)
[Controllable Active-Passive Voice Generation using Prefix Tuning](https://aclanthology.org/2023.ranlp-stud.3) (Knappich & Schrader, RANLP 2023)
ACL
- Valentin Knappich and Timo Pierre Schrader. 2023. Controllable Active-Passive Voice Generation using Prefix Tuning. In Proceedings of the 8th Student Research Workshop associated with the International Conference Recent Advances in Natural Language Processing, pages 23–32, Varna, Bulgaria. INCOMA Ltd., Shoumen, Bulgaria.