Hoppa till huvudinnehåll
Språkbanken Text är en avdelning inom Språkbanken.

BibTeX

@inProceedings{berdicevskis-etal-2023-superlim-331445,
	title        = {Superlim: A Swedish Language Understanding Evaluation Benchmark},
	booktitle    = {Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, December 6-10, 2023, Singapore  / Houda Bouamor, Juan Pino, Kalika Bali (Editors)},
	author       = {Berdicevskis, Aleksandrs and Bouma, Gerlof and Kurtz, Robin and Morger, Felix and Öhman, Joey and Adesam, Yvonne and Borin, Lars and Dannélls, Dana and Forsberg, Markus and Isbister, Tim and Lindahl, Anna and Malmsten, Martin and Rekathati, Faton and Sahlgren, Magnus and Volodina, Elena and Börjeson, Love and Hengchen, Simon and Tahmasebi, Nina},
	year         = {2023},
	publisher    = {Association for Computational Linguistics},
	address      = {Stroudsburg, PA},
	ISBN         = {979-8-89176-060-8},
	pages        = {8137--8153},
}

@inProceedings{lindahl-2024-disagreement-341074,
	title        = {Disagreement in Argumentation Annotation},
	abstract     = {Disagreement, perspective or error? There is a growing discussion against the idea of a unified ground truth in annotated data, as well as the usefulness of such a ground truth and resulting gold standard. In data perspectivism, this issue is exemplified with tasks such as hate speech or sentiment classification in which annotators’ different perspectives are important to include. In this paper we turn to argumentation, a related field which has had less focus from this point of view. Argumentation is difficult to annotate for several reasons, from the more practical parts of deciding where the argumentation begins and ends to questions of how argumentation is defined and what it consists of. Learning more about disagreement is therefore important in order to improve argument annotation and to better utilize argument annotated data. Because of this, we examine disagreement in two corpora annotated with argumentation both manually and computationally. We find that disagreement is often not because of annotation errors or mistakes but due to the possibility of multiple possible interpretations. More specifically, these interpretations can be over boundaries, label or existence of argumentation. These results emphasize the need for more thorough analysis of disagreement in data, outside of the more common inter-annotator agreement measures.},
	booktitle    = {3rd Workshop on Perspectivist Approaches to NLP, NLPerspectives 2024 at LREC-COLING 2024 - Workshop Proceedings},
	author       = {Lindahl, Anna},
	year         = {2024},
	ISBN         = {9782493814234},
}

@article{lindahl-borin-2024-annotation-333043,
	title        = {Annotation for computational argumentation analysis: Issues and perspectives},
	abstract     = {Argumentation has long been studied in a number of disciplines, including several branches of linguistics. In recent years, computational processing of argumentation has been added to the list, reflecting a general interest from the field of natural language processing (NLP) in building natural language understanding systems for increasingly intricate language phenomena. Computational argumentation analysis – referred to as argumentation mining in the NLP literature – requires large amounts of real-world text with manually analyzed argumentation. This process is known as annotation in the NLP literature and such annotated datasets are used both as “gold standards” for assessing the quality of NLP applications and as training data for the machine learning algorithms underlying most state of the art approaches to NLP. Argumentation annotation turns out to be complex, both because argumentation can be complex in itself and because it does not come across as a unitary phenomenon in the literature. In this survey we review how argumentation has been studied in other fields, how it has been annotated in NLP and what has been achieved so far. We conclude with describing some important current and future issues to be resolved.},
	journal      = {Language and Linguistics Compass},
	author       = {Lindahl, Anna and Borin, Lars},
	year         = {2024},
	volume       = {18},
	number       = {1},
}

@incollection{lindahl-r?dveneide-2022-argumentative-325260,
	title        = {Argumentative Language Resources at Språkbanken Text},
	abstract     = {Språkbanken Text at the University of Gothenburg is a CLARIN B-centre providing language resources in Swedish, as well as tools to use them, for a wide range  of  disciplines.  In  2017,  we  began  exploring  the  field  of  argument  mining  –  the process of automatically identifying and classifying arguments in text – partly aimed  at  establishing  language  resources  and  tools  for  argument  analysis  and  mining in Swedish.},
	booktitle    = {CLARIN: The Infrastructure for Language Resources, eds. Darja Fišer & Andreas Witt},
	author       = {Lindahl, Anna and Rødven-Eide, Stian},
	year         = {2022},
	publisher    = {De Gruyter},
	address      = {Berlin, Boston},
	ISBN         = { 9783110767346 },
	pages        = {667--690},
}

@inProceedings{lindahl-2022-machines-322689,
	title        = {Do machines dream of artificial agreement?},
	abstract     = {In this paper the (assumed) inconsistency between F1-scores and annotator agreement measures is discussed. This is exemplified in five corpora from the field of argumentation mining. High agreement is important in most annotation tasks and also often deemed important for an annotated dataset to be useful for machine learning. However, depending on the annotation task, achieving high agreement is not always easy. This is especially true in the field of argumentation mining, because argumentation can be complex as well as implicit. There are also many different models of argumentation, which can be seen in the increasing number of argumentation annotated corpora. Many of these reach moderate agreement but are still used in machine learning tasks, reaching high F1-score. In this paper we describe five corpora, in particular how they have been created and used, to see how they have handled disagreement. We find that agreement can be raised post-production, but that more discussion regarding evaluating and calculating agreement is needed. We conclude that standardisation of the models and the evaluation methods could help such discussions.},
	booktitle    = {Proceedings of the 18th Joint ACL - ISO Workshop on Interoperable Semantic Annotation within LREC2022, June 20, 2022, Marseille, France / Harry Bunt (Editor)},
	author       = {Lindahl, Anna},
	year         = {2022},
	publisher    = {European Language Resources Association},
	address      = {Marseille},
	ISBN         = {979-10-95546-81-8},
}

@inProceedings{lindahl-2020-annotating-302453,
	title        = {Annotating argumentation in Swedish social media},
	booktitle    = {Proceedings of the 7th Workshop on Argument Mining, Barcelona, Spain (Online), December 13, 2020.},
	author       = {Lindahl, Anna},
	year         = {2020},
	publisher    = {Association for Computational Linguistics},
	address      = {Stroudsburg, PA},
	ISBN         = {978-1-952148-44-6},
}

@inProceedings{lindahl-etal-2019-towards-286588,
	title        = {Towards Assessing Argumentation Annotation - A First Step},
	abstract     = {This paper presents a first attempt at using Walton’s argumentation schemes for annotating arguments in Swedish political text and assessing the feasibility of using this particular set of schemes with two linguistically trained annotators. The texts are not pre-annotated with argumentation structure beforehand. The results show that the annotators differ both in number of annotated arguments and selection of the conclusion and premises which make up the arguments. They also differ in their labeling of the schemes, but grouping the schemes increases their agreement. The outcome from this will be used to develop guidelines for future annotations.},
	booktitle    = {Proceedings of the 6th Workshop on Argument Mining, August 1, 2019, Florence, Italy / Benno Stein, Henning Wachsmuth (Editors)},
	author       = {Lindahl, Anna and Borin, Lars and Rouces, Jacobo},
	year         = {2019},
	publisher    = {Association for Computational Linguistics},
	address      = {Stroudsburg, PA},
	ISBN         = {978-1-950737-33-8},
}