Skip to main content
Språkbanken Text is a part of Språkbanken.

BibTeX

@misc{ljunglof-etal-2021-selected-306645,
	title        = {Selected contributions from the Eighth Swedish Language Technology Conference (SLTC-2020), 25-27 November 2020},
	abstract     = {Selected extended papers from the Eight Swedish Language Technology Conference (SLTC-2020) which was held between 25-27 November 2020 in Gothenburg and online.},
	author       = {Ljunglöf, Peter and Dobnik, Simon and Johansson, Richard},
	year         = {2021},
	publisher    = {Linköping University Electronic Press},
	address      = {Linköping, Sweden},
	ISBN         = {978-91-7929-031-3},
}

@inProceedings{norlund-etal-2021-transferring-309548,
	title        = {Transferring Knowledge from Vision to Language: How to Achieve it and how to Measure it?},
	abstract     = {Large language models are known to suffer from the hallucination problem in that they are prone to output statements that are false or inconsistent, indicating a lack of knowledge. A proposed solution to this is to provide the model with additional data modalities that complements the knowledge obtained through text. We investigate the use of visual data to complement the knowledge of large language models by proposing a method for evaluating visual knowledge transfer to text for uni- or multimodal language models. The method is based on two steps, 1) a novel task querying for knowledge of memory colors, i.e. typical colors of well-known objects, and 2) filtering of model training data to clearly separate knowledge contributions. Additionally, we introduce a model architecture that involves a visual imagination step and evaluate it with our proposed method. We find that our method can successfully be used to measure visual knowledge transfer capabilities in models and that our novel model architecture shows promising results for leveraging multimodal knowledge in a unimodal setting.},
	booktitle    = {Proceedings of the Fourth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pp. 149-162, Punta Cana, Dominican Republic},
	author       = {Norlund, Tobias and Hagström, Lovisa and Johansson, Richard},
	year         = {2021},
	publisher    = {Association for Computational Linguistics},
}

@incollection{johansson-etal-2021-semantic-310775,
	title        = {Semantic Role Labeling},
	booktitle    = {The Swedish FrameNet++. Harmonization, integration, method development and practical language technology applications},
	editor       = {Dana Dannélls and Lars Borin and Karin Friberg Heppin},
	author       = {Johansson, Richard and Friberg Heppin, Karin and Kokkinakis, Dimitrios},
	year         = {2021},
	publisher    = {John Benjamins Publishing Company},
	address      = {Amsterdam / Philadelphia},
	ISBN         = {978 90 272 5848 9},
	pages        = {264–280},
}

@incollection{johansson-2021-resource-310770,
	title        = {NLP for Resource Building},
	booktitle    = {The Swedish FrameNet++. Harmonization, integration, method development and practical language technology applications},
	author       = {Johansson, Richard},
	year         = {2021},
	publisher    = {John Benjamins Publishing Company},
	address      = {Amsterdam / Philadelphia},
	ISBN         = {978 90 272 5848 9 },
	pages        = {169–190},
}

@inProceedings{hagstrom-johansson-2021-knowledge-305832,
	title        = {Knowledge Distillation for Swedish NER models: A Search for Performance and Efficiency},
	abstract     = {The current recipe for better model performance within NLP is to increase model size and training data. While it gives us models with increasingly impressive results, it also makes it more difficult to train and deploy state-of-the-art models for NLP due to increasing computational costs. Model compression is a field of research that aims to alleviate this problem. The field encompasses different methods that aim to preserve the performance of a model while decreasing the size of it. One such method is knowledge distillation. In this article, we investigate the effect of knowledge distillation for named entity recognition models in Swedish. We show that while some sequence tagging models benefit from knowledge distillation, not all models do. This prompts us to ask questions about in which situations and for which models knowledge distillation is beneficial. We also reason about the effect of knowledge distillation on computational costs.},
	booktitle    = {Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa 2021), pp. 124–134. Reykjavík, Iceland.},
	author       = {Hagström, Lovisa and Johansson, Richard},
	year         = {2021},
	publisher    = {Linköping University Electronic Press},
	address      = {Linköping},
	ISBN         = {978-91-7929-614-8},
}