@inproceedings{strubell-mccallum-2017-dependency,
title = "Dependency Parsing with Dilated Iterated Graph {CNN}s",
author = "Strubell, Emma and
McCallum, Andrew",
editor = "Chang, Kai-Wei and
Chang, Ming-Wei and
Srikumar, Vivek and
Rush, Alexander M.",
booktitle = "Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4301",
doi = "10.18653/v1/W17-4301",
pages = "1--6",
abstract = "Dependency parses are an effective way to inject linguistic knowledge into many downstream tasks, and many practitioners wish to efficiently parse sentences at scale. Recent advances in GPU hardware have enabled neural networks to achieve significant gains over the previous best models, these models still fail to leverage GPUs{'} capability for massive parallelism due to their requirement of sequential processing of the sentence. In response, we propose Dilated Iterated Graph Convolutional Neural Networks (DIG-CNNs) for graph-based dependency parsing, a graph convolutional architecture that allows for efficient end-to-end GPU parsing. In experiments on the English Penn TreeBank benchmark, we show that DIG-CNNs perform on par with some of the best neural network parsers.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="strubell-mccallum-2017-dependency">
<titleInfo>
<title>Dependency Parsing with Dilated Iterated Graph CNNs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Emma</namePart>
<namePart type="family">Strubell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">McCallum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ming-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Rush</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Copenhagen, Denmark</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Dependency parses are an effective way to inject linguistic knowledge into many downstream tasks, and many practitioners wish to efficiently parse sentences at scale. Recent advances in GPU hardware have enabled neural networks to achieve significant gains over the previous best models, these models still fail to leverage GPUs’ capability for massive parallelism due to their requirement of sequential processing of the sentence. In response, we propose Dilated Iterated Graph Convolutional Neural Networks (DIG-CNNs) for graph-based dependency parsing, a graph convolutional architecture that allows for efficient end-to-end GPU parsing. In experiments on the English Penn TreeBank benchmark, we show that DIG-CNNs perform on par with some of the best neural network parsers.</abstract>
<identifier type="citekey">strubell-mccallum-2017-dependency</identifier>
<identifier type="doi">10.18653/v1/W17-4301</identifier>
<location>
<url>https://aclanthology.org/W17-4301</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>1</start>
<end>6</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dependency Parsing with Dilated Iterated Graph CNNs
%A Strubell, Emma
%A McCallum, Andrew
%Y Chang, Kai-Wei
%Y Chang, Ming-Wei
%Y Srikumar, Vivek
%Y Rush, Alexander M.
%S Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing
%D 2017
%8 September
%I Association for Computational Linguistics
%C Copenhagen, Denmark
%F strubell-mccallum-2017-dependency
%X Dependency parses are an effective way to inject linguistic knowledge into many downstream tasks, and many practitioners wish to efficiently parse sentences at scale. Recent advances in GPU hardware have enabled neural networks to achieve significant gains over the previous best models, these models still fail to leverage GPUs’ capability for massive parallelism due to their requirement of sequential processing of the sentence. In response, we propose Dilated Iterated Graph Convolutional Neural Networks (DIG-CNNs) for graph-based dependency parsing, a graph convolutional architecture that allows for efficient end-to-end GPU parsing. In experiments on the English Penn TreeBank benchmark, we show that DIG-CNNs perform on par with some of the best neural network parsers.
%R 10.18653/v1/W17-4301
%U https://aclanthology.org/W17-4301
%U https://doi.org/10.18653/v1/W17-4301
%P 1-6
Markdown (Informal)
[Dependency Parsing with Dilated Iterated Graph CNNs](https://aclanthology.org/W17-4301) (Strubell & McCallum, 2017)
ACL
- Emma Strubell and Andrew McCallum. 2017. Dependency Parsing with Dilated Iterated Graph CNNs. In Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing, pages 1–6, Copenhagen, Denmark. Association for Computational Linguistics.