Skip to main content
Språkbanken Text is a part of Språkbanken.

BibTeX

@inProceedings{johansson-etal-2016-multi-233140,
	title        = {A Multi-domain Corpus of Swedish Word Sense Annotation},
	abstract     = {We describe the word sense annotation layer in Eukalyptus, a freely available five-domain corpus of contemporary Swedish with several annotation layers. The annotation uses the SALDO lexicon to define the sense inventory, and allows word sense annotation of compound segments and multiword units. We give an overview of the new annotation tool developed for this project, and finally present an analysis of the inter-annotator agreement between two annotators.
},
	booktitle    = {10th edition of the Language Resources and Evaluation Conference, 23-28 May 2016, Portorož (Slovenia)},
	author       = {Johansson, Richard and Adesam, Yvonne and Bouma, Gerlof and Hedberg, Karin},
	year         = {2016},
	publisher    = {European Language Resources Association},
	ISBN         = {978-2-9517408-9-1},
}

@inProceedings{adouane-johansson-2016-gulf-242243,
	title        = {Gulf Arabic Resource Building for Sentiment Analysis},
	abstract     = {This paper deals with building linguistic resources for Gulf Arabic, one of the Arabic variations, for sentiment analysis task using machine learning. To our knowledge, no previous works were done for Gulf Arabic sentiment analysis despite the fact that it is present in different online platforms. Hence, the first challenge is the absence of annotated data and sentiment lexicons. To fill this gap, we created these two main linguistic resources. Then we conducted different experiments: use Naive Bayes classifier without any lexicon; add a sentiment lexicon designed basically for MSA; use only the compiled Gulf Arabic sentiment lexicon and finally use both MSA and Gulf Arabic sentiment lexicons. The Gulf Arabic lexicon gives a good improvement of the classifier accuracy (90.54 %) over a baseline that does not use the lexicon (82.81%), while the MSA lexicon causes the accuracy to drop to (76.83%). Moreover, mixing MSA and Gulf Arabic lexicons causes the accuracy to drop to (84.94%) compared to using only Gulf Arabic lexicon. This indicates that it is useless to use MSA resources to deal with Gulf Arabic due to the considerable differences and conflicting structures between these two languages.},
	booktitle    = {Proceedings of the Language Resources and Evaluation Conference (LREC), 23-28 May 2016, Portorož, Slovenia},
	author       = {Adouane, Wafia and Johansson, Richard},
	year         = {2016},
	publisher    = {European Language Resources Association},
	ISBN         = {978-2-9517408-9-1},
}

@inProceedings{ehrlemark-etal-2016-retrieving-242241,
	title        = {Retrieving Occurrences of Grammatical Constructions},
	abstract     = {Finding authentic examples of grammatical constructions is central in constructionist approaches to linguistics, language processing, and second language learning. In this paper, we address this problem as an information retrieval (IR) task. To facilitate research in this area, we built a benchmark collection by annotating the occurrences of six constructions in a Swedish corpus. Furthermore, we implemented a simple and flexible retrieval system for finding construction occurrences, in which the user specifies a ranking function using lexical-semantic similarities (lexicon-based or distributional). The system was evaluated using standard IR metrics on the new benchmark, and we saw that lexical-semantical rerankers improve significantly over a purely surface-oriented system, but must be carefully tailored for each individual construction.
},
	booktitle    = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics : Technical Papers, December 11–17; Osaka, Japan},
	author       = {Ehrlemark, Anna and Johansson, Richard and Lyngfelt, Benjamin},
	year         = {2016},
	ISBN         = {978-4-87974-702-0},
}

@inProceedings{nietopina-johansson-2016-embedding-241139,
	title        = {Embedding Senses for Efficient Graph-based Word Sense Disambiguation},
	abstract     = {We propose a simple graph-based method for word sense disambiguation (WSD) where sense and context embeddings are constructed by applying the Skip-gram method to random walks over the sense graph. We used this method to build a WSD system for Swedish using the SALDO lexicon, and evaluated it on six different annotated test sets. In all cases, our system was several orders of magnitude faster than a state-of-the-art PageRank-based system, while outperforming a random baseline soundly.},
	booktitle    = { Proceedings of TextGraphs-10: the Workshop on Graph-based Methods for Natural Language Processing},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2016},
	publisher    = {Association for Computational Linguistics},
}

@inProceedings{adouane-etal-2016-romanized-255457,
	title        = {Romanized Arabic and Berber Detection Using Prediction by Partial Matching and Dictionary Methods},
	abstract     = {Arabic is one of the Semitic languages written in Arabic script in its standard form. However, the recent rise of social media and new technologies has contributed considerably to the emergence of a new form of Arabic, namely Arabic written in Latin scripts, often called Romanized Arabic or Arabizi. While Romanized Arabic is an informal language, Berber or Tamazight uses Latin script in its standard form with some orthography differences depending on the country it is used in. Both these languages are under-resourced and unknown to the state-of-theart language identifiers. In this paper, we present a language automatic identifier for both Romanized Arabic and Romanized Berber. We also describe the built linguistic resources (large dataset and lexicons) including a wide range of Arabic dialects (Algerian, Egyptian, Gulf, Iraqi, Levantine, Moroccan and Tunisian dialects) as well as the most popular Berber varieties (Kabyle, Tashelhit, Tarifit, Tachawit and Tamzabit). We use the Prediction by Partial Matching (PPM) and dictionary-based methods. The methods reach a macro-average F-Measure of 98.74% and 97.60% respectively.},
	booktitle    = {2016 IEEE/ACS 13TH INTERNATIONAL CONFERENCE OF COMPUTER SYSTEMS AND APPLICATIONS (AICCSA)},
	author       = {Adouane, Wafia and Semmar, N. and Johansson, Richard},
	year         = {2016},
	ISBN         = {978-1-5090-4320-0},
}

@inProceedings{adouane-etal-2016-arabicized-252492,
	title        = {Arabicized and Romanized Berber Automatic Identification},
	abstract     = {We present an automatic language identification tool for both Arabicized Berber (Berber written in the Arabic script) and Romanized Berber (Berber written in the Latin script). The focus is on short texts (social media content). We use supervised machine learning method with character and word-based n-gram models as features. We also describe the corpora used in this paper. For both Arabicized and Romanized Berber, character-based 5-grams score the best giving an F-score of 99.50%.},
	booktitle    = {Proceedings of TICAM 2016},
	author       = {Adouane, Wafia and Semmar, Nasredine and Johansson, Richard},
	year         = {2016},
	publisher    = {IRCAM},
	address      = {Morocco},
}

@inProceedings{adouane-etal-2016-romanized-246849,
	title        = {Romanized Berber and Romanized Arabic Automatic Language Identification Using Machine Learning},
	abstract     = {The identification of the language of text/speech input is the first step to be able to properly do any language-dependent natural language processing.  The task is called Automatic Language Identification (ALI). Being a well-studied field since early 1960’s, various methods have been applied to many standard languages. The ALI standard methods require datasets for training and use character/word-based n-gram models.   However,  social media and new technologies have contributed to the rise of informal and minority languages on the Web. The state-of-the-art automatic language identifiers fail to properly identify many of them.  Romanized Arabic (RA) and Romanized Berber (RB) are cases of these informal languages which are under-resourced.  The goal of this paper is twofold: detect RA and RB, at a document level, as separate languages and distinguish between them as they coexist in North Africa. We consider the task as a classification problem and use supervised machine learning to solve it.  For both languages, character-based 5-grams combined with additional lexicons score the best, F-score of 99.75% and 97.77% for RB and RA respectively.},
	booktitle    = {Proceedings of the Third Workshop on NLP for Similar Languages, Varieties and Dialects; 53–61; December 12, 2016 ; Osaka, Japan},
	author       = {Adouane, Wafia and Semmar, Nasredine and Johansson, Richard},
	year         = {2016},
	publisher    = {Association for Computational Linguistics},
}

@article{nietopina-johansson-2016-benchmarking-251412,
	title        = {Benchmarking Word Sense Disambiguation Systems for Swedish},
	abstract     = {We compare several word sense disambiguation systems for Swedish and evaluate them on seven different sense-annotated corpora. Our results show that unsupervised systems beat a random baseline, but generally do not outperform a first-sense baseline considerably. On a lexical-sample dataset that allows us to train a supervised system, the unsupervised disambiguators are strongly outperformed by the supervised one.},
	journal      = {The Sixth Swedish Language Technology Conference},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2016},
}

@inProceedings{adouane-etal-2016-asirem-246853,
	title        = {ASIREM Participation at the Discriminating Similar Languages Shared Task 2016},
	booktitle    = {Proceedings of the Third Workshop on NLP for Similar Languages, Varieties and Dialects; 163–169; December 12; Osaka, Japan},
	author       = {Adouane, Wafia and Semmar, Nasredine and Johansson, Richard},
	year         = {2016},
}

@inProceedings{adouane-etal-2016-automatic-246765,
	title        = {Automatic Detection of Arabicized Berber and Arabic Varieties},
	abstract     = {Automatic Language Identification (ALI) is the detection of the natural language of an input text by a machine. It is the first necessary step to do any language-dependent natural language processing task. Various methods have been successfully applied to a wide range of languages, and the state-of-the-art automatic language identifiers are mainly based on character n-gram models trained on huge corpora. However, there are many languages which are not yet automatically processed, for instance minority and informal languages. Many of these languages are only spoken and do not exist in a written format. Social media platforms and new technologies have facilitated the emergence of written format for these spoken languages based on pronunciation. The latter are not well represented on the Web, commonly referred to as under-resourced languages, and the current available ALI tools fail to properly recognize them. In this paper, we revisit the problem of ALI with the focus on Arabicized Berber and dialectal Arabic short texts. We introduce new resources and evaluate the existing methods. The results show that machine learning models combined with lexicons are well suited for detecting Arabicized Berber and different Arabic varieties and distinguishing between them, giving a macro-average F-score of 92.94%.},
	booktitle    = {Proceedings of the Third Workshop on NLP for Similar Languages, Varieties and Dialects; 63–72; December 12; Osaka, Japan},
	author       = {Adouane, Wafia and Semmar, Nasredine and Johansson, Richard and Bobicev, Victoria},
	year         = {2016},
}