@inProceedings{dannells-etal-2024-transformer-338708, title = {Transformer-based Swedish Semantic Role Labeling through Transfer Learning}, abstract = {Semantic Role Labeling (SRL) is a task in natural language understanding where the goal is to extract semantic roles for a given sentence. English SRL has achieved state-of-the-art performance using Transformer techniques and supervised learning. However, this technique is not a viable choice for smaller languages like Swedish due to the limited amount of training data. In this paper, we present the first effort in building a Transformer-based SRL system for Swedish by exploring multilingual and cross-lingual transfer learning methods and leveraging the Swedish FrameNet resource. We demonstrate that multilingual transfer learning outperforms two different cross-lingual transfer models. We also found some differences between frames in FrameNet that can either hinder or enhance the model’s performance. The resulting end-to-end model is freely available and will be made accessible through Språkbanken Text’s research infrastructure.}, booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), 20-25 May, 2024, Torino, Italia}, author = {Dannélls, Dana and Johansson, Richard and Buhr, Lucy Yang}, year = {2024}, publisher = {ELRA and ICCL}, address = {Turin, Italy}, ISBN = {978-2-493814-10-4}, } @inProceedings{lofgren-dannells-2024-post-336065, title = {Post-OCR Correction of Digitized Swedish Newspapers with ByT5}, abstract = {Many collections of digitized newspapers suffer from poor OCR quality, which impacts readability, information retrieval, and analysis of the material. Errors in OCR output can be reduced by applying machine translation models to translate it into a corrected version. Although transformer models show promising results in post-OCR correction and related tasks in other languages, they have not yet been explored for correcting OCR errors in Swedish texts. This paper presents a post-OCR correction model for Swedish 19th to 21th century newspapers based on the pre-trained transformer model ByT5. Three versions of the model were trained on different mixes of training data. The best model, which achieved a 36\% reduction in CER, is made freely available and will be integrated into the automatic processing pipeline of Språkbanken Text, a Swedish language technology infrastructure containing modern and historical written data.}, booktitle = {Proceedings of the 8th Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature (LaTeCH-CLfL 2024), March 22, 2024, Malta}, author = {Löfgren, Viktoria and Dannélls, Dana}, year = {2024}, publisher = {Association for Computational Linguistics}, address = {United States Pennsylvania East Stroudsburg}, ISBN = {979-8-89176-069-1}, }