@article{artetxe-schwenk-2019-massively,
    title = "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond",
    author = "Artetxe, Mikel  and
      Schwenk, Holger",
    editor = "Lee, Lillian  and
      Johnson, Mark  and
      Roark, Brian  and
      Nenkova, Ani",
    journal = "Transactions of the Association for Computational Linguistics",
    volume = "7",
    year = "2019",
    address = "Cambridge, MA",
    publisher = "MIT Press",
    url = "https://aclanthology.org/Q19-1038/",
    doi = "10.1162/tacl_a_00288",
    pages = "597--610",
    abstract = "We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at \url{https://github.com/facebookresearch/LASER}."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="artetxe-schwenk-2019-massively">
    <titleInfo>
        <title>Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond</title>
    </titleInfo>
    <name type="personal">
        <namePart type="given">Mikel</namePart>
        <namePart type="family">Artetxe</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Holger</namePart>
        <namePart type="family">Schwenk</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <originInfo>
        <dateIssued>2019</dateIssued>
    </originInfo>
    <typeOfResource>text</typeOfResource>
    <genre authority="bibutilsgt">journal article</genre>
    <relatedItem type="host">
        <titleInfo>
            <title>Transactions of the Association for Computational Linguistics</title>
        </titleInfo>
        <originInfo>
            <issuance>continuing</issuance>
            <publisher>MIT Press</publisher>
            <place>
                <placeTerm type="text">Cambridge, MA</placeTerm>
            </place>
        </originInfo>
        <genre authority="marcgt">periodical</genre>
        <genre authority="bibutilsgt">academic journal</genre>
    </relatedItem>
    <abstract>We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at https://github.com/facebookresearch/LASER.</abstract>
    <identifier type="citekey">artetxe-schwenk-2019-massively</identifier>
    <identifier type="doi">10.1162/tacl_a_00288</identifier>
    <location>
        <url>https://aclanthology.org/Q19-1038/</url>
    </location>
    <part>
        <date>2019</date>
        <detail type="volume"><number>7</number></detail>
        <extent unit="page">
            <start>597</start>
            <end>610</end>
        </extent>
    </part>
</mods>
</modsCollection>
%0 Journal Article
%T Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond
%A Artetxe, Mikel
%A Schwenk, Holger
%J Transactions of the Association for Computational Linguistics
%D 2019
%V 7
%I MIT Press
%C Cambridge, MA
%F artetxe-schwenk-2019-massively
%X We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at https://github.com/facebookresearch/LASER.
%R 10.1162/tacl_a_00288
%U https://aclanthology.org/Q19-1038/
%U https://doi.org/10.1162/tacl_a_00288
%P 597-610
Markdown (Informal)
[Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond](https://aclanthology.org/Q19-1038/) (Artetxe & Schwenk, TACL 2019)
ACL