@inproceedings{yu-etal-2018-approximate,
title = "Approximate Dynamic Oracle for Dependency Parsing with Reinforcement Learning",
author = "Yu, Xiang and
Vu, Ngoc Thang and
Kuhn, Jonas",
editor = "de Marneffe, Marie-Catherine and
Lynn, Teresa and
Schuster, Sebastian",
booktitle = "Proceedings of the Second Workshop on Universal Dependencies ({UDW} 2018)",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6021",
doi = "10.18653/v1/W18-6021",
pages = "183--191",
abstract = "We present a general approach with reinforcement learning (RL) to approximate dynamic oracles for transition systems where exact dynamic oracles are difficult to derive. We treat oracle parsing as a reinforcement learning problem, design the reward function inspired by the classical dynamic oracle, and use Deep Q-Learning (DQN) techniques to train the oracle with gold trees as features. The combination of a priori knowledge and data-driven methods enables an efficient dynamic oracle, which improves the parser performance over static oracles in several transition systems.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yu-etal-2018-approximate">
<titleInfo>
<title>Approximate Dynamic Oracle for Dependency Parsing with Reinforcement Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ngoc</namePart>
<namePart type="given">Thang</namePart>
<namePart type="family">Vu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonas</namePart>
<namePart type="family">Kuhn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Universal Dependencies (UDW 2018)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Catherine</namePart>
<namePart type="family">de Marneffe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Teresa</namePart>
<namePart type="family">Lynn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Schuster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a general approach with reinforcement learning (RL) to approximate dynamic oracles for transition systems where exact dynamic oracles are difficult to derive. We treat oracle parsing as a reinforcement learning problem, design the reward function inspired by the classical dynamic oracle, and use Deep Q-Learning (DQN) techniques to train the oracle with gold trees as features. The combination of a priori knowledge and data-driven methods enables an efficient dynamic oracle, which improves the parser performance over static oracles in several transition systems.</abstract>
<identifier type="citekey">yu-etal-2018-approximate</identifier>
<identifier type="doi">10.18653/v1/W18-6021</identifier>
<location>
<url>https://aclanthology.org/W18-6021</url>
</location>
<part>
<date>2018-11</date>
<extent unit="page">
<start>183</start>
<end>191</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Approximate Dynamic Oracle for Dependency Parsing with Reinforcement Learning
%A Yu, Xiang
%A Vu, Ngoc Thang
%A Kuhn, Jonas
%Y de Marneffe, Marie-Catherine
%Y Lynn, Teresa
%Y Schuster, Sebastian
%S Proceedings of the Second Workshop on Universal Dependencies (UDW 2018)
%D 2018
%8 November
%I Association for Computational Linguistics
%C Brussels, Belgium
%F yu-etal-2018-approximate
%X We present a general approach with reinforcement learning (RL) to approximate dynamic oracles for transition systems where exact dynamic oracles are difficult to derive. We treat oracle parsing as a reinforcement learning problem, design the reward function inspired by the classical dynamic oracle, and use Deep Q-Learning (DQN) techniques to train the oracle with gold trees as features. The combination of a priori knowledge and data-driven methods enables an efficient dynamic oracle, which improves the parser performance over static oracles in several transition systems.
%R 10.18653/v1/W18-6021
%U https://aclanthology.org/W18-6021
%U https://doi.org/10.18653/v1/W18-6021
%P 183-191
Markdown (Informal)
[Approximate Dynamic Oracle for Dependency Parsing with Reinforcement Learning](https://aclanthology.org/W18-6021) (Yu et al., UDW 2018)
ACL