@inproceedings{nawezi-etal-2023-style,
title = "Style Locality for Controllable Generation with k{NN} Language Models",
author = "Nawezi, Gilles and
Flek, Lucie and
Welch, Charles",
editor = "Hazarika, Devamanyu and
Tang, Xiangru Robert and
Jin, Di",
booktitle = "Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!",
month = sep,
year = "2023",
address = "Prague, Czech Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.tllm-1.7",
pages = "68--75",
abstract = "Recent language models have been improved by the addition of external memory. Nearest neighbor language models retrieve similar contexts to assist in word prediction. The addition of locality levels allows a model to learn how to weight neighbors based on their relative location to the current text in source documents, and have been shown to further improve model performance. Nearest neighbor models have been explored for controllable generation but have not examined the use of locality levels. We present a novel approach for this purpose and evaluate it using automatic and human evaluation on politeness, formality, supportiveness, and toxicity textual data. We find that our model is successfully able to control style and provides a better fluency-style trade-off than previous work",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nawezi-etal-2023-style">
<titleInfo>
<title>Style Locality for Controllable Generation with kNN Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gilles</namePart>
<namePart type="family">Nawezi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucie</namePart>
<namePart type="family">Flek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Charles</namePart>
<namePart type="family">Welch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!</title>
</titleInfo>
<name type="personal">
<namePart type="given">Devamanyu</namePart>
<namePart type="family">Hazarika</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiangru</namePart>
<namePart type="given">Robert</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Di</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Prague, Czech Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent language models have been improved by the addition of external memory. Nearest neighbor language models retrieve similar contexts to assist in word prediction. The addition of locality levels allows a model to learn how to weight neighbors based on their relative location to the current text in source documents, and have been shown to further improve model performance. Nearest neighbor models have been explored for controllable generation but have not examined the use of locality levels. We present a novel approach for this purpose and evaluate it using automatic and human evaluation on politeness, formality, supportiveness, and toxicity textual data. We find that our model is successfully able to control style and provides a better fluency-style trade-off than previous work</abstract>
<identifier type="citekey">nawezi-etal-2023-style</identifier>
<location>
<url>https://aclanthology.org/2023.tllm-1.7</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>68</start>
<end>75</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Style Locality for Controllable Generation with kNN Language Models
%A Nawezi, Gilles
%A Flek, Lucie
%A Welch, Charles
%Y Hazarika, Devamanyu
%Y Tang, Xiangru Robert
%Y Jin, Di
%S Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!
%D 2023
%8 September
%I Association for Computational Linguistics
%C Prague, Czech Republic
%F nawezi-etal-2023-style
%X Recent language models have been improved by the addition of external memory. Nearest neighbor language models retrieve similar contexts to assist in word prediction. The addition of locality levels allows a model to learn how to weight neighbors based on their relative location to the current text in source documents, and have been shown to further improve model performance. Nearest neighbor models have been explored for controllable generation but have not examined the use of locality levels. We present a novel approach for this purpose and evaluate it using automatic and human evaluation on politeness, formality, supportiveness, and toxicity textual data. We find that our model is successfully able to control style and provides a better fluency-style trade-off than previous work
%U https://aclanthology.org/2023.tllm-1.7
%P 68-75
Markdown (Informal)
[Style Locality for Controllable Generation with kNN Language Models](https://aclanthology.org/2023.tllm-1.7) (Nawezi et al., TLLM-WS 2023)
ACL