@inproceedings{gu-etal-2025-llm,
title = "An {LLM}-based Framework for Domain-Specific Information Extraction: A Case Study in Computer Science and Chemistry",
author = "Gu, Xungang and
Tian, Yangjie and
Li, Ning and
Liu, Meng and
Xu, Ruohua and
Zhang, He and
Liu, Hanqiu and
Sheng, Yongpan and
Liu, Ming",
editor = "Kummerfeld, Jonathan K. and
Joshi, Aditya and
Dras, Mark",
booktitle = "Proceedings of the 23rd Annual Workshop of the Australasian Language Technology Association",
month = nov,
year = "2025",
address = "Sydney, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.alta-main.8/",
pages = "101--111",
ISBN = "1834-7037",
abstract = "Information extraction (IE) in specialized domains like computer science and chemistry is challenged by the poor generalization of traditional models and the knowledge deficits of general-purpose Large Language Models (LLMs). We introduce a robust, LLM-based framework featuring two core contributions: an end-to-end training and inference paradigm that combines continual pre-training (CPT) for knowledge injection, supervised fine-tuning (SFT) for task alignment, and retrieval-augmented generation (RAG) for inference-time enhancement; and a novel LLM-assisted data annotation pipeline for the efficient creation of high-quality training data. Comprehensive experiments demonstrate that while fine-tuning alone yields strong in-domain performance, our complete framework exhibits superior robustness and generalization. It consistently achieves state-of-the-art results in challenging domain-shift and novel-schema scenarios, validating our integrated approach for building adaptable and high-performance domain-specific IE systems."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gu-etal-2025-llm">
<titleInfo>
<title>An LLM-based Framework for Domain-Specific Information Extraction: A Case Study in Computer Science and Chemistry</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xungang</namePart>
<namePart type="family">Gu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yangjie</namePart>
<namePart type="family">Tian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ning</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Meng</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruohua</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">He</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hanqiu</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongpan</namePart>
<namePart type="family">Sheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ming</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Annual Workshop of the Australasian Language Technology Association</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="given">K</namePart>
<namePart type="family">Kummerfeld</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aditya</namePart>
<namePart type="family">Joshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Dras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Sydney, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">1834-7037</identifier>
</relatedItem>
<abstract>Information extraction (IE) in specialized domains like computer science and chemistry is challenged by the poor generalization of traditional models and the knowledge deficits of general-purpose Large Language Models (LLMs). We introduce a robust, LLM-based framework featuring two core contributions: an end-to-end training and inference paradigm that combines continual pre-training (CPT) for knowledge injection, supervised fine-tuning (SFT) for task alignment, and retrieval-augmented generation (RAG) for inference-time enhancement; and a novel LLM-assisted data annotation pipeline for the efficient creation of high-quality training data. Comprehensive experiments demonstrate that while fine-tuning alone yields strong in-domain performance, our complete framework exhibits superior robustness and generalization. It consistently achieves state-of-the-art results in challenging domain-shift and novel-schema scenarios, validating our integrated approach for building adaptable and high-performance domain-specific IE systems.</abstract>
<identifier type="citekey">gu-etal-2025-llm</identifier>
<location>
<url>https://aclanthology.org/2025.alta-main.8/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>101</start>
<end>111</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An LLM-based Framework for Domain-Specific Information Extraction: A Case Study in Computer Science and Chemistry
%A Gu, Xungang
%A Tian, Yangjie
%A Li, Ning
%A Liu, Meng
%A Xu, Ruohua
%A Zhang, He
%A Liu, Hanqiu
%A Sheng, Yongpan
%A Liu, Ming
%Y Kummerfeld, Jonathan K.
%Y Joshi, Aditya
%Y Dras, Mark
%S Proceedings of the 23rd Annual Workshop of the Australasian Language Technology Association
%D 2025
%8 November
%I Association for Computational Linguistics
%C Sydney, Australia
%@ 1834-7037
%F gu-etal-2025-llm
%X Information extraction (IE) in specialized domains like computer science and chemistry is challenged by the poor generalization of traditional models and the knowledge deficits of general-purpose Large Language Models (LLMs). We introduce a robust, LLM-based framework featuring two core contributions: an end-to-end training and inference paradigm that combines continual pre-training (CPT) for knowledge injection, supervised fine-tuning (SFT) for task alignment, and retrieval-augmented generation (RAG) for inference-time enhancement; and a novel LLM-assisted data annotation pipeline for the efficient creation of high-quality training data. Comprehensive experiments demonstrate that while fine-tuning alone yields strong in-domain performance, our complete framework exhibits superior robustness and generalization. It consistently achieves state-of-the-art results in challenging domain-shift and novel-schema scenarios, validating our integrated approach for building adaptable and high-performance domain-specific IE systems.
%U https://aclanthology.org/2025.alta-main.8/
%P 101-111
Markdown (Informal)
[An LLM-based Framework for Domain-Specific Information Extraction: A Case Study in Computer Science and Chemistry](https://aclanthology.org/2025.alta-main.8/) (Gu et al., ALTA 2025)
ACL
- Xungang Gu, Yangjie Tian, Ning Li, Meng Liu, Ruohua Xu, He Zhang, Hanqiu Liu, Yongpan Sheng, and Ming Liu. 2025. An LLM-based Framework for Domain-Specific Information Extraction: A Case Study in Computer Science and Chemistry. In Proceedings of the 23rd Annual Workshop of the Australasian Language Technology Association, pages 101–111, Sydney, Australia. Association for Computational Linguistics.