@inproceedings{chen-etal-2020-glyph2vec,
title = "{G}lyph2{V}ec: Learning {C}hinese Out-of-Vocabulary Word Embedding from Glyphs",
author = "Chen, Hong-You and
Yu, Sz-Han and
Lin, Shou-de",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.256",
doi = "10.18653/v1/2020.acl-main.256",
pages = "2865--2871",
abstract = "Chinese NLP applications that rely on large text often contain huge amounts of vocabulary which are sparse in corpus. We show that characters{'} written form, \textit{Glyphs}, in ideographic languages could carry rich semantics. We present a multi-modal model, \textit{Glyph2Vec}, to tackle Chinese out-of-vocabulary word embedding problem. Glyph2Vec extracts visual features from word glyphs to expand current word embedding space for out-of-vocabulary word embedding, without the need of accessing any corpus, which is useful for improving Chinese NLP systems, especially for low-resource scenarios. Experiments across different applications show the significant effectiveness of our model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chen-etal-2020-glyph2vec">
<titleInfo>
<title>Glyph2Vec: Learning Chinese Out-of-Vocabulary Word Embedding from Glyphs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hong-You</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sz-Han</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shou-de</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Jurafsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Chai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natalie</namePart>
<namePart type="family">Schluter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joel</namePart>
<namePart type="family">Tetreault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Chinese NLP applications that rely on large text often contain huge amounts of vocabulary which are sparse in corpus. We show that characters’ written form, Glyphs, in ideographic languages could carry rich semantics. We present a multi-modal model, Glyph2Vec, to tackle Chinese out-of-vocabulary word embedding problem. Glyph2Vec extracts visual features from word glyphs to expand current word embedding space for out-of-vocabulary word embedding, without the need of accessing any corpus, which is useful for improving Chinese NLP systems, especially for low-resource scenarios. Experiments across different applications show the significant effectiveness of our model.</abstract>
<identifier type="citekey">chen-etal-2020-glyph2vec</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.256</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.256</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>2865</start>
<end>2871</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Glyph2Vec: Learning Chinese Out-of-Vocabulary Word Embedding from Glyphs
%A Chen, Hong-You
%A Yu, Sz-Han
%A Lin, Shou-de
%Y Jurafsky, Dan
%Y Chai, Joyce
%Y Schluter, Natalie
%Y Tetreault, Joel
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F chen-etal-2020-glyph2vec
%X Chinese NLP applications that rely on large text often contain huge amounts of vocabulary which are sparse in corpus. We show that characters’ written form, Glyphs, in ideographic languages could carry rich semantics. We present a multi-modal model, Glyph2Vec, to tackle Chinese out-of-vocabulary word embedding problem. Glyph2Vec extracts visual features from word glyphs to expand current word embedding space for out-of-vocabulary word embedding, without the need of accessing any corpus, which is useful for improving Chinese NLP systems, especially for low-resource scenarios. Experiments across different applications show the significant effectiveness of our model.
%R 10.18653/v1/2020.acl-main.256
%U https://aclanthology.org/2020.acl-main.256
%U https://doi.org/10.18653/v1/2020.acl-main.256
%P 2865-2871
Markdown (Informal)
[Glyph2Vec: Learning Chinese Out-of-Vocabulary Word Embedding from Glyphs](https://aclanthology.org/2020.acl-main.256) (Chen et al., ACL 2020)
ACL