Hoppa till huvudinnehåll
Språkbanken Text är en avdelning inom Språkbanken.

BibTeX

@inProceedings{nietopina-johansson-2017-training-261938,
	title        = {Training Word Sense Embeddings With Lexicon-based Regularization},
	abstract     = {We propose to improve word sense embeddings by enriching an automatic corpus-based method with lexicographic data. Information from a lexicon is introduced into the learning algorithm’s objective function through a regularizer. The incorporation of lexicographic data yields embeddings that are able to reflect expertdefined word senses, while retaining the robustness, high quality, and coverage of automatic corpus-based methods. These properties are observed in a manual inspection of the semantic clusters that different degrees of regularizer strength create in the vector space. Moreover, we evaluate the sense embeddings in two
downstream applications: word sense disambiguation and semantic frame prediction, where they outperform simpler approaches. Our results show that a corpusbased model balanced with lexicographic data learns better representations and improve their performance in downstream tasks},
	booktitle    = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), Taipei, Taiwan, November 27 – December 1, 2017},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2017},
	publisher    = {Asian Federation of Natural Language Processing },
	ISBN         = {978-1-948087-00-1},
}

@inProceedings{mogren-johansson-2017-character-256929,
	title        = {Character-based Recurrent Neural Networks for Morphological Relational Reasoning},
	abstract     = {We present a model for predicting word forms based on
	    morphological relational reasoning with analogies. While
	    previous work has explored tasks such as morphological inflection
	    and reinflection, these models rely on an explicit enumeration
	    of morphological features, which may not be available in all cases.
	    
	    To address the task of predicting a word form given a demo
	      relation (a pair of word forms) and a query word, we
	    devise a character-based recurrent neural network architecture
	    using three separate encoders and a decoder.
	    
	    We also investigate a multiclass learning setup, where the
	    prediction of the relation type label is used as an auxiliary task.
	    Our results show that the exact form can be predicted for
	    English with an accuracy of 94.7%. For Swedish, which has a more
	    complex morphology with more inflectional patterns for nouns and
	    verbs, the accuracy is 89.3%. We also show that using the
	    auxiliary task of learning the relation type speeds up convergence
	    and improves the prediction accuracy for the word generation task.},
	booktitle    = {Proceedings of the First Workshop on Subword and Character Level Models in NLP},
	author       = {Mogren, Olof and Johansson, Richard},
	year         = {2017},
	publisher    = {Association for Computational Linguistics},
	address      = {Stroudsburg, PA, United States},
}

@inProceedings{oepen-etal-2017-2017-264156,
	title        = {The 2017 Shared Task on Extrinsic Parser Evaluation. Towards a Reusable Community Infrastructure},
	abstract     = {The 2017 Shared Task on Extrinsic Parser
Evaluation (EPE 2017) seeks to provide
better estimates of the relative utility of
different types of dependency representa-
tions for a variety of downstream applica-
tions that depend centrally on the analysis
of grammatical structure. EPE 2017 de-
fi
nes a generalized notion of lexicalized
syntactico-semantic dependency represen-
tations and provides a common interchange
format to three state-of-the-art downstream
applications, viz. biomedical event extrac-
tion, negation resolution, and
fi
ne-grained
opinion analysis. As a
fi
rst step towards
building a generic and extensible infras-
tructure for extrinsic parser evaluation, the
downstream applications have been gener-
alized to support a broad range of diverese
dependency representations (including di-
vergent sentence and token boundaries)
and to allow fully automated re-training
and evaluation for a speci
fi
c collection of
parser outputs. Nine teams participated
in EPE 2017, submitting 49 distinct runs
that encompass many different families
of dependency representations, distinct ap-
proaches to preprocessing and parsing, and
various types and volumes of training data.},
	booktitle    = {Proceedings of the 2017 Shared Task on Extrinsic Parser Evaluation at the Fourth International Conference on Dependency Linguistics and the 15th International Conference on Parsing Technologies},
	author       = {Oepen, Stephan and Øvrelid, Lilja and Björne, Jari and Johansson, Richard and Lapponi, Emanuele and Ginter, Filip and Velldal, Erik},
	year         = {2017},
	publisher    = {Association for Computational Linguistics (ACL)},
	address      = {Stroudsburg, USA},
	ISBN         = {978-1-945626-74-6},
}

@inProceedings{johansson-2017-2017-264160,
	title        = {EPE 2017: The Trento–Gothenburg Opinion Extraction System},
	abstract     = {We give an overview of one of the
three downstream systems in the Extrin-
sic Parser Evaluation shared task of 2017:
the Trento–Gothenburg system for opin-
ion extraction. We describe the modi
fi
ca-
tions required to make the system agnos-
tic to its input dependency representation,
and discuss how the input affects the vari-
ous submodules of the system. The results
of the EPE shared task are presented and
discussed, and to get a more detailed un-
derstanding of the effects of the dependen-
cies we run two of the submodules sepa-
rately. The results suggest that the module
where the effects are strongest is the opin-
ion holder extraction module, which can
be explained by the fact that this module
uses several dependency-based features.
For the other modules, the effects are hard
to measure.},
	booktitle    = {Proceedings of the 2017 Shared Task on Extrinsic Parser Evaluation at the Fourth International Conference on Dependency Linguistics and the 15th International Conference on Parsing Technologies},
	author       = {Johansson, Richard},
	year         = {2017},
	publisher    = {Association for Computational Linguistics (ACL) },
	address      = {Stroudsburg, USA},
	ISBN         = {978-1-945626-74-6 },
}

@inProceedings{ehrlemark-etal-2016-retrieving-242241,
	title        = {Retrieving Occurrences of Grammatical Constructions},
	abstract     = {Finding authentic examples of grammatical constructions is central in constructionist approaches to linguistics, language processing, and second language learning. In this paper, we address this problem as an information retrieval (IR) task. To facilitate research in this area, we built a benchmark collection by annotating the occurrences of six constructions in a Swedish corpus. Furthermore, we implemented a simple and flexible retrieval system for finding construction occurrences, in which the user specifies a ranking function using lexical-semantic similarities (lexicon-based or distributional). The system was evaluated using standard IR metrics on the new benchmark, and we saw that lexical-semantical rerankers improve significantly over a purely surface-oriented system, but must be carefully tailored for each individual construction.
},
	booktitle    = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics : Technical Papers, December 11–17; Osaka, Japan},
	author       = {Ehrlemark, Anna and Johansson, Richard and Lyngfelt, Benjamin},
	year         = {2016},
	ISBN         = {978-4-87974-702-0},
}

@inProceedings{nietopina-johansson-2016-embedding-241139,
	title        = {Embedding Senses for Efficient Graph-based Word Sense Disambiguation},
	abstract     = {We propose a simple graph-based method for word sense disambiguation (WSD) where sense and context embeddings are constructed by applying the Skip-gram method to random walks over the sense graph. We used this method to build a WSD system for Swedish using the SALDO lexicon, and evaluated it on six different annotated test sets. In all cases, our system was several orders of magnitude faster than a state-of-the-art PageRank-based system, while outperforming a random baseline soundly.},
	booktitle    = { Proceedings of TextGraphs-10: the Workshop on Graph-based Methods for Natural Language Processing},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2016},
	publisher    = {Association for Computational Linguistics},
}

@inProceedings{johansson-etal-2016-multi-233140,
	title        = {A Multi-domain Corpus of Swedish Word Sense Annotation},
	abstract     = {We describe the word sense annotation layer in Eukalyptus, a freely available five-domain corpus of contemporary Swedish with several annotation layers. The annotation uses the SALDO lexicon to define the sense inventory, and allows word sense annotation of compound segments and multiword units. We give an overview of the new annotation tool developed for this project, and finally present an analysis of the inter-annotator agreement between two annotators.
},
	booktitle    = {10th edition of the Language Resources and Evaluation Conference, 23-28 May 2016, Portorož (Slovenia)},
	author       = {Johansson, Richard and Adesam, Yvonne and Bouma, Gerlof and Hedberg, Karin},
	year         = {2016},
	publisher    = {European Language Resources Association},
	ISBN         = {978-2-9517408-9-1},
}

@inProceedings{nietopina-johansson-2015-simple-222611,
	title        = {A Simple and Efficient Method to Generate Word Sense Representations},
	abstract     = {Distributed representations of words have boosted  the  performance  of  many  Natural Language Processing tasks.  However, usually only one representation per word is  obtained,  not  acknowledging  the  fact that some words have multiple meanings. This has a negative effect on the individual word representations and the language model as a whole. In this paper we present a  simple  model  that  enables  recent  techniques  for  building  word  vectors  to  represent distinct senses of polysemic words. In our assessment of this model we show that  it  is  able  to  effectively  discriminate between words’ senses and to do so in a computationally efficient manner.},
	booktitle    = {Proceedings of International Conference in Recent Advances in Natural Language Processing},
	editor       = {Galia Angelova and Kalina Bontcheva and Ruslan Mitkov and Hissar and Bulgaria 7–9 September and 2015},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2015},
	pages        = {465--472},
}

@inProceedings{ghanimifard-johansson-2015-enriching-222749,
	title        = {Enriching Word-sense Embeddings with Translational Context},
	abstract     = {Vector-space models derived from corpora are an effective way to learn a representation of word meaning directly from data, and these models have many uses in practical applications. A  number  of  unsupervised  approaches  have  been  proposed to  automatically  learn  representations of word senses
directly  from  corpora,  but since  these  methods  use  no  information
but the words themselves, they sometimes miss distinctions that could be possible to make if more information were available.

In this paper, we present a general framework that we call context enrichment that incorporates  external  information  during the  training  of  multi-sense  vector-space models.   Our  approach  is  agnostic  as  to which external signal is used to enrich the context, but in this work we consider the use of translations
as the source of enrichment. We evaluated the models trained using the translation-enriched context using
several similarity benchmarks and a word analogy test set. In all our evaluations, the enriched  model  outperformed  the  purely word-based baseline soundly.
},
	booktitle    = {Proceedings of Recent Advances in Natural Language Processing},
	editor       = {Galia Angelova and Kalina Bontcheva and Ruslan Mitkov. International Conference and Hissar and Bulgaria 7–9 September and 2015},
	author       = {Ghanimifard, Mehdi and Johansson, Richard},
	year         = {2015},
	pages        = {208--215},
}

@inProceedings{johansson-nietopina-2015-combining-216865,
	title        = {Combining Relational and Distributional Knowledge for Word Sense Disambiguation},
	abstract     = {We present a new approach to word sense
disambiguation derived from recent ideas
in distributional semantics. The input to
the algorithm is a large unlabeled corpus and a graph describing how senses
are related; no sense-annotated corpus is
needed. The fundamental idea is to embed meaning representations of senses in
the same continuous-valued vector space
as the representations of
words. In this way, the knowledge encoded in the lexical resource is combined with the infor-
mation derived by the distributional methods. Once this step has been carried out,
the sense representations can be plugged
back into e.g. the skip-gram model, which
allows us to compute scores for the different possible senses of a word in a given
context.

We evaluated the new word sense disambiguation system on two Swedish test
sets annotated with senses defined by the
SALDO lexical resource. In both evaluations, our system soundly outperformed
random and first-sense baselines. Its accuracy was slightly above that of a well-
known graph-based system, while being
computationally much more efficient,},
	booktitle    = {Proceedings of the 20th Nordic Conference of Computational Linguistics, May 12-13, Vilnius, Lithuania. Linköping Electronic Conference Proceedings 109, Linköping University Electronic Press..},
	author       = {Johansson, Richard and Nieto Piña, Luis},
	year         = {2015},
	ISBN         = {978-91-7519-098-3},
	pages        = {69--78},
}

@inProceedings{johansson-nietopina-2015-embedding-217863,
	title        = {Embedding a Semantic Network in a Word Space},
	abstract     = {We present a framework for using continuous-
space vector representations of word meaning
to derive new vectors representing the meaning of senses listed in a semantic network. It is a post-processing approach that can be applied to several types of word vector representations. It uses two ideas: first, that vectors for polysemous words can be decomposed into a convex combination of sense vectors; secondly, that the vector for a sense is kept similar to those of its neighbors in the network.This leads to a constrained optimization problem, and we present an approximation for the case when the distance function is the squared Euclidean.

We applied this algorithm on a Swedish semantic network, and we evaluate the quality
of the resulting sense representations extrinsically by showing that they give large improvements when used in a classifier that creates lexical units for FrameNet frames.
},
	booktitle    = {Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Denver, United States, May 31 – June 5, 2015},
	author       = {Johansson, Richard and Nieto Piña, Luis},
	year         = {2015},
	ISBN         = {978-1-941643-49-5},
	pages        = {1428--1433},
}

@inProceedings{borin-etal-2015-here-217351,
	title        = {Here be dragons? The perils and promises of inter-resource lexical-semantic mapping},
	abstract     = {Lexical-semantic knowledges sources are a stock item in the language technologist’s toolbox, having proved their practical worth in many and diverse natural language processing (NLP) applications. In linguistics, lexical semantics comes in many flavors, but in the NLP world, wordnets reign more or less supreme. There has been some promising work utilizing Roget-style thesauruses instead, but wider experimentation is hampered by the limited availability of such resources. The work presented here is a first step in the direction of creating a freely available Roget-style lexical resource for modern Swedish. Here, we explore methods for automatic disambiguation of interresource mappings with the longer-term goal of utilizing similar techniques for automatic enrichment of lexical-semantic resources.},
	booktitle    = {Linköping Electronic Conference Proceedings. Semantic resources and semantic annotation for Natural Language Processing and the Digital Humanities. Workshop at NODALIDA , May 11, 13-18 2015, Vilnius},
	author       = {Borin, Lars and Nieto Piña, Luis and Johansson, Richard},
	year         = {2015},
	volume       = {112},
	ISBN         = {978-91-7519-049-5},
	pages        = {1--11},
}

@inProceedings{kageback-etal-2015-neural-217864,
	title        = {Neural context embeddings for automatic discovery of word senses},
	abstract     = {Word sense induction (WSI) is the problem of
automatically building an inventory of senses
for a set of target words using only a text
corpus. We introduce a new method for embedding word instances and their context, for use in WSI. The method, Instance-context embedding (ICE), leverages neural word embeddings, and the correlation statistics they capture, to compute high quality embeddings of word contexts. In WSI, these context embeddings are clustered to find the word senses present in the text. ICE is based on a novel method for combining word embeddings using continuous Skip-gram, based on both se-
mantic and a temporal aspects of context
words. ICE is evaluated both in a new system, and in an extension to a previous system
for WSI. In both cases, we surpass previous
state-of-the-art, on the WSI task of SemEval-2013, which highlights the generality of ICE. Our proposed system achieves a 33% relative improvement.},
	booktitle    = {Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing. Denver, United States},
	author       = {Kågebäck, Mikael and Johansson, Fredrik and Johansson, Richard and Dubhashi, Devdatt},
	year         = {2015},
	pages        = {25--32},
}

@article{johansson-2014-automatic-201874,
	title        = {Automatic Expansion of the Swedish FrameNet Lexicon},
	abstract     = {We evaluate several lexicon-based and corpus-based methods to automatically induce new lexical units for the Swedish FrameNet, and we see that the best-performing setup uses a combination of both types of methods. A particular challenge for Swedish is the absence of a lexical resource such as WordNet; however, we show that the semantic network SALDO, which is organized according to lexicographical principles quite different from those of WordNet, is very useful for our purposes.},
	journal      = {Constructions and Frames},
	author       = {Johansson, Richard},
	year         = {2014},
	volume       = {6},
	number       = {1},
	pages        = {92--113},
}