Hoppa till huvudinnehåll

BibTeX

@book{lindahl-2025-argumentation-358528,
	title        = {Argumentation and agreement: Annotating and evaluating Swedish corpora for argumentation mining},
	abstract     = {Argumentation occurs in all parts of life, and as such, is studied across disciplines. In natural language processing, the field of argumentation mining aims to develop computational tools that automatically analyze and evaluate argumentation. Such tools have many uses, from automatically grading essays to identifying fallacies. In order to build such tools, annotated data is essential both for training and evaluating, especially with large language models (LLMs). Creating annotated datasets, however, presents significant challenges, not only because of the complexity of argumentation but also methodological questions such as how to represent argumentation and how to evaluate annotation quality.

To create more resources as well as investigate these challenges, in this thesis, I explore several approaches to argumentation annotation. To this end, I also present a comprehensive survey of argumentation annotation. Three annotation approaches of varying complexity are explored: argumentation schemes applied to editorials, argumentative spans to online forums and political debates, and attitude annotation to tweets. The datasets thus represent a wide variety of genres and approaches. Attitude in tweets was found to show the highest agreement among annotators, while annotation of editorials with argumentation schemes was the most challenging.

In the evaluation of the annotations, several types of disagreement were identified. Most saliently, disagreement often occurred in cases where multiple interpretations are possible, challenging agreement as the primary measure of quality. These findings demonstrate the need for more comprehensive evaluation approaches. I therefore demonstrate ways to evaluate beyond single agreement measures: agreement analysis from multiple angles, annotator pattern investigation, and manual inspection of disagreement.

To further explore argumentation annotation, I investigate how two different LLMs annotate argumentation compared to human annotators, finding that while the models exhibit similar annotation behavior as humans, with similar agreement levels and disagreement patterns, the models agree more among themselves than human annotators.},
	author       = {Lindahl, Anna},
	year         = {2025},
	ISBN         = {978-91-8115-302-6},
}

@inProceedings{munozsanchez-etal-2025-trying-352646,
	title        = {Are You Trying to Convince Me or Are You Trying to Deceive Me? Using Argumentation Types to Identify Deceptive News},
	abstract     = {The way we relay factual information and the way we present deceptive information as truth differs from the perspective of argumentation. In this paper, we explore whether these differences can be exploited to detect deceptive political news in English. We do this by training a model to detect different kinds of argumentation in online news text. We use sentence embeddings extracted from an argumentation type classification model as features for a deceptive news classifier. This deceptive news classification model leverages the sequence of argumentation types within an article to determine whether it is credible or deceptive. Our approach outperforms other state-of-the-art models while having lower variance. Finally, we use the output of our argumentation model to analyze the differences between credible and deceptive news based on the distribution of argumentation types across the articles. Results of this analysis indicate that credible political news presents statements supported by a variety of argumentation types, while deceptive news relies on anecdotes and testimonial.},
	booktitle    = {Proceedings of the The 9th Workshop on Online Abuse and Harms (WOAH)},
	author       = {Muñoz Sánchez, Ricardo and Francis, Emilie and Lindahl, Anna},
	year         = {2025},
	publisher    = {Association for Computational Linguistics (ACL)},
	address      = {Austria, Vienna},
	ISBN         = {979-8-89176-105-6},
	pages        = {355–372},
}