@inproceedings{daza-2026-hands-approach,
title = "A Hands-on Approach to {NLP} Fundamentals for External Domain Experts in the {LLM} Era",
author = "Daza, Angel",
editor = {A{\ss}enmacher, Matthias and
Biester, Laura and
Borg, Claudia and
Kov{\'a}cs, Gy{\"o}rgy and
Mieskes, Margot and
Serrano, Sofia},
booktitle = "Proceedings of the Seventh Workshop on Teaching Natural Language Processing ({T}each{NLP} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2026.teachingnlp-1.7/",
pages = "37--40",
ISBN = "979-8-89176-375-3",
abstract = "With the advent of Large Language Models (LLMs) researchers outside the Natural Language Processing (NLP) field are interested in learning how to process textual data for their own domain research goals. They are particularly motivated to start experimenting directly with LLMs, implicitly neglecting the large amount of accumulated knowledge that NLP has to offer them. In this text, we briefly share our new lesson materials that aim to show aspiring practitioners the strong connection between NLP fundamentals and LLMs, in the form of a two-day workshop. Our training material is mainly aimed at graduate students outside the NLP sphere who have basic technical knowledge and wish to start working with text, is fully open source and available online."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="daza-2026-hands-approach">
<titleInfo>
<title>A Hands-on Approach to NLP Fundamentals for External Domain Experts in the LLM Era</title>
</titleInfo>
<name type="personal">
<namePart type="given">Angel</namePart>
<namePart type="family">Daza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2026-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Workshop on Teaching Natural Language Processing (TeachNLP 2026)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Aßenmacher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laura</namePart>
<namePart type="family">Biester</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claudia</namePart>
<namePart type="family">Borg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">György</namePart>
<namePart type="family">Kovács</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margot</namePart>
<namePart type="family">Mieskes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sofia</namePart>
<namePart type="family">Serrano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Rabat, Morocco</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-375-3</identifier>
</relatedItem>
<abstract>With the advent of Large Language Models (LLMs) researchers outside the Natural Language Processing (NLP) field are interested in learning how to process textual data for their own domain research goals. They are particularly motivated to start experimenting directly with LLMs, implicitly neglecting the large amount of accumulated knowledge that NLP has to offer them. In this text, we briefly share our new lesson materials that aim to show aspiring practitioners the strong connection between NLP fundamentals and LLMs, in the form of a two-day workshop. Our training material is mainly aimed at graduate students outside the NLP sphere who have basic technical knowledge and wish to start working with text, is fully open source and available online.</abstract>
<identifier type="citekey">daza-2026-hands-approach</identifier>
<location>
<url>https://aclanthology.org/2026.teachingnlp-1.7/</url>
</location>
<part>
<date>2026-03</date>
<extent unit="page">
<start>37</start>
<end>40</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Hands-on Approach to NLP Fundamentals for External Domain Experts in the LLM Era
%A Daza, Angel
%Y Aßenmacher, Matthias
%Y Biester, Laura
%Y Borg, Claudia
%Y Kovács, György
%Y Mieskes, Margot
%Y Serrano, Sofia
%S Proceedings of the Seventh Workshop on Teaching Natural Language Processing (TeachNLP 2026)
%D 2026
%8 March
%I Association for Computational Linguistics
%C Rabat, Morocco
%@ 979-8-89176-375-3
%F daza-2026-hands-approach
%X With the advent of Large Language Models (LLMs) researchers outside the Natural Language Processing (NLP) field are interested in learning how to process textual data for their own domain research goals. They are particularly motivated to start experimenting directly with LLMs, implicitly neglecting the large amount of accumulated knowledge that NLP has to offer them. In this text, we briefly share our new lesson materials that aim to show aspiring practitioners the strong connection between NLP fundamentals and LLMs, in the form of a two-day workshop. Our training material is mainly aimed at graduate students outside the NLP sphere who have basic technical knowledge and wish to start working with text, is fully open source and available online.
%U https://aclanthology.org/2026.teachingnlp-1.7/
%P 37-40
Markdown (Informal)
[A Hands-on Approach to NLP Fundamentals for External Domain Experts in the LLM Era](https://aclanthology.org/2026.teachingnlp-1.7/) (Daza, TeachingNLP 2026)
ACL