@InProceedings{hartmann-EtAl:2017:LAW,
  author    = {Hartmann, Silvana  and  M\'{u}jdricza-Maydt, \'{E}va  and  Kuznetsov, Ilia  and  Gurevych, Iryna  and  Frank, Anette},
  title     = {Assessing SRL Frameworks with Automatic Training Data Expansion},
  booktitle = {Proceedings of the 11th Linguistic Annotation Workshop},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {115--121},
  abstract  = {We present the first experiment-based study that explicitly contrasts the three
	major semantic role labeling frameworks.
	As a prerequisite, we create a dataset labeled with parallel FrameNet-,
	PropBank-, and VerbNet-style labels for German.  
	We train a state-of-the-art SRL tool for German for the different annotation
	styles and provide a comparative analysis across frameworks.
	We further explore the behavior of the frameworks with automatic training data
	generation. 
	VerbNet provides larger semantic expressivity than PropBank, and we find that
	its generalization capacity approaches PropBank in SRL training, 
	but it benefits less from training data expansion than the sparse-data affected
	FrameNet.},
  url       = {http://www.aclweb.org/anthology/W17-0814}
}

