@inproceedings{wingate-etal-2022-prompt,
title = "Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models",
author = "Wingate, David and
Shoeybi, Mohammad and
Sorensen, Taylor",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.412",
doi = "10.18653/v1/2022.findings-emnlp.412",
pages = "5621--5634",
abstract = "We explore the idea of compressing the prompts used to condition language models, and show that compressed prompts can retain a substantive amount of information about the original prompt. For severely compressed prompts, while fine-grained information is lost, abstract information and general sentiments can be retained with surprisingly few parameters, which can be useful in the context of decode-time algorithms for controllability and toxicity reduction. We find that some complex prompts can be effectively compressed into a single token to guide generation. We also show that compressed prompts are largely compositional, and can be constructed such that they can be used to control independent aspects of generated text.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wingate-etal-2022-prompt">
<titleInfo>
<title>Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Wingate</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="family">Shoeybi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taylor</namePart>
<namePart type="family">Sorensen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We explore the idea of compressing the prompts used to condition language models, and show that compressed prompts can retain a substantive amount of information about the original prompt. For severely compressed prompts, while fine-grained information is lost, abstract information and general sentiments can be retained with surprisingly few parameters, which can be useful in the context of decode-time algorithms for controllability and toxicity reduction. We find that some complex prompts can be effectively compressed into a single token to guide generation. We also show that compressed prompts are largely compositional, and can be constructed such that they can be used to control independent aspects of generated text.</abstract>
<identifier type="citekey">wingate-etal-2022-prompt</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.412</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.412</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>5621</start>
<end>5634</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models
%A Wingate, David
%A Shoeybi, Mohammad
%A Sorensen, Taylor
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F wingate-etal-2022-prompt
%X We explore the idea of compressing the prompts used to condition language models, and show that compressed prompts can retain a substantive amount of information about the original prompt. For severely compressed prompts, while fine-grained information is lost, abstract information and general sentiments can be retained with surprisingly few parameters, which can be useful in the context of decode-time algorithms for controllability and toxicity reduction. We find that some complex prompts can be effectively compressed into a single token to guide generation. We also show that compressed prompts are largely compositional, and can be constructed such that they can be used to control independent aspects of generated text.
%R 10.18653/v1/2022.findings-emnlp.412
%U https://aclanthology.org/2022.findings-emnlp.412
%U https://doi.org/10.18653/v1/2022.findings-emnlp.412
%P 5621-5634
Markdown (Informal)
[Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models](https://aclanthology.org/2022.findings-emnlp.412) (Wingate et al., Findings 2022)
ACL