Hoppa till huvudinnehåll
Språkbanken Text är en avdelning inom Språkbanken.

BibTeX

@article{tahmasebi-etal-2015-visions-212969,
	title        = {Visions and open challenges for a knowledge-based culturomics},
	abstract     = {The concept of culturomics was born out of the availability of massive amounts of textual data and the interest to make sense of cultural and language phenomena over time. Thus far however, culturomics has only made use of, and shown the great potential of, statistical methods. In this paper, we present a vision for a knowledge-based culturomics that complements traditional culturomics. We discuss the possibilities and challenges of combining knowledge-based methods with statistical methods and address major challenges that arise due to the nature of the data; diversity of sources, changes in language over time as well as temporal dynamics of information in general. We address all layers needed for knowledge-based culturomics, from natural language processing and relations to summaries and opinions.},
	journal      = {International Journal on Digital Libraries},
	author       = {Tahmasebi, Nina and Borin, Lars and Capannini, Gabriele and Dubhashi, Devdatt and Exner, Peter and Forsberg, Markus and Gossen, Gerhard and Johansson, Fredrik and Johansson, Richard and Kågebäck, Mikael and Mogren, Olof and Nugues, Pierre and Risse, Thomas},
	year         = {2015},
	volume       = {15},
	number       = {2-4},
	pages        = {169--187},
}

@inProceedings{ghanimifard-johansson-2015-enriching-222749,
	title        = {Enriching Word-sense Embeddings with Translational Context},
	abstract     = {Vector-space models derived from corpora are an effective way to learn a representation of word meaning directly from data, and these models have many uses in practical applications. A  number  of  unsupervised  approaches  have  been  proposed to  automatically  learn  representations of word senses
directly  from  corpora,  but since  these  methods  use  no  information
but the words themselves, they sometimes miss distinctions that could be possible to make if more information were available.

In this paper, we present a general framework that we call context enrichment that incorporates  external  information  during the  training  of  multi-sense  vector-space models.   Our  approach  is  agnostic  as  to which external signal is used to enrich the context, but in this work we consider the use of translations
as the source of enrichment. We evaluated the models trained using the translation-enriched context using
several similarity benchmarks and a word analogy test set. In all our evaluations, the enriched  model  outperformed  the  purely word-based baseline soundly.
},
	booktitle    = {Proceedings of Recent Advances in Natural Language Processing},
	editor       = {Galia Angelova and Kalina Bontcheva and Ruslan Mitkov. International Conference and Hissar and Bulgaria 7–9 September and 2015},
	author       = {Ghanimifard, Mehdi and Johansson, Richard},
	year         = {2015},
	pages        = {208--215},
}

@inProceedings{adesam-etal-2015-multiwords-228833,
	title        = {Multiwords, Word Senses and Multiword Senses in the Eukalyptus Treebank of Written Swedish},
	abstract     = {Multiwords reside at the intersection of the lexicon and syntax and in an annotation project, they will affect both levels.  In the Eukalyptus treebank of written Swedish, we treat multiwords formally as syntactic objects, which are assigned a lexical type and sense. With the help of a simple dichotomy, analyzed vs unanalyzed multiwords, and the expressiveness of the syntactic annotation formalism employed, we are able to flexibly handle most multiword types and usages.},
	booktitle    = {Proceedings of the Fourteenth International Workshop on Treebanks and Linguistic Theories (TLT14), 11–12 December 2015 Warsaw, Poland},
	author       = {Adesam, Yvonne and Bouma, Gerlof and Johansson, Richard},
	year         = {2015},
	ISBN         = {978-83-63159-18-4},
	pages        = {3--12},
}

@inProceedings{nietopina-johansson-2015-simple-222611,
	title        = {A Simple and Efficient Method to Generate Word Sense Representations},
	abstract     = {Distributed representations of words have boosted  the  performance  of  many  Natural Language Processing tasks.  However, usually only one representation per word is  obtained,  not  acknowledging  the  fact that some words have multiple meanings. This has a negative effect on the individual word representations and the language model as a whole. In this paper we present a  simple  model  that  enables  recent  techniques  for  building  word  vectors  to  represent distinct senses of polysemic words. In our assessment of this model we show that  it  is  able  to  effectively  discriminate between words’ senses and to do so in a computationally efficient manner.},
	booktitle    = {Proceedings of International Conference in Recent Advances in Natural Language Processing},
	editor       = {Galia Angelova and Kalina Bontcheva and Ruslan Mitkov and Hissar and Bulgaria 7–9 September and 2015},
	author       = {Nieto Piña, Luis and Johansson, Richard},
	year         = {2015},
	pages        = {465--472},
}

@inProceedings{adesam-etal-2015-defining-217815,
	title        = {Defining the Eukalyptus forest – the Koala treebank of Swedish},
	abstract     = {This paper details the design of the lexical and syntactic layers of a new annotated corpus of Swedish contemporary texts. In order to make the corpus adaptable into a variety of representations, the annotation is of a hybrid type with head-marked constituents and function-labeled edges, and with a rich annotation of non-local dependencies. The source material has been taken from public sources, to allow the resulting corpus to be made freely available.},
	booktitle    = {Proceedings of the 20th Nordic Conference of Computational Linguistics, NODALIDA 2015, May 11-13, 2015, Vilnius, Lithuania. Edited by Beáta Megyesi},
	author       = {Adesam, Yvonne and Bouma, Gerlof and Johansson, Richard},
	year         = {2015},
	ISBN         = {978-91-7519-098-3},
	pages        = {1--9},
}

@inProceedings{johansson-nietopina-2015-embedding-217863,
	title        = {Embedding a Semantic Network in a Word Space},
	abstract     = {We present a framework for using continuous-
space vector representations of word meaning
to derive new vectors representing the meaning of senses listed in a semantic network. It is a post-processing approach that can be applied to several types of word vector representations. It uses two ideas: first, that vectors for polysemous words can be decomposed into a convex combination of sense vectors; secondly, that the vector for a sense is kept similar to those of its neighbors in the network.This leads to a constrained optimization problem, and we present an approximation for the case when the distance function is the squared Euclidean.

We applied this algorithm on a Swedish semantic network, and we evaluate the quality
of the resulting sense representations extrinsically by showing that they give large improvements when used in a classifier that creates lexical units for FrameNet frames.
},
	booktitle    = {Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Denver, United States, May 31 – June 5, 2015},
	author       = {Johansson, Richard and Nieto Piña, Luis},
	year         = {2015},
	ISBN         = {978-1-941643-49-5},
	pages        = {1428--1433},
}

@inProceedings{kageback-etal-2015-neural-217864,
	title        = {Neural context embeddings for automatic discovery of word senses},
	abstract     = {Word sense induction (WSI) is the problem of
automatically building an inventory of senses
for a set of target words using only a text
corpus. We introduce a new method for embedding word instances and their context, for use in WSI. The method, Instance-context embedding (ICE), leverages neural word embeddings, and the correlation statistics they capture, to compute high quality embeddings of word contexts. In WSI, these context embeddings are clustered to find the word senses present in the text. ICE is based on a novel method for combining word embeddings using continuous Skip-gram, based on both se-
mantic and a temporal aspects of context
words. ICE is evaluated both in a new system, and in an extension to a previous system
for WSI. In both cases, we surpass previous
state-of-the-art, on the WSI task of SemEval-2013, which highlights the generality of ICE. Our proposed system achieves a 33% relative improvement.},
	booktitle    = {Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing. Denver, United States},
	author       = {Kågebäck, Mikael and Johansson, Fredrik and Johansson, Richard and Dubhashi, Devdatt},
	year         = {2015},
	pages        = {25--32},
}

@inProceedings{borin-etal-2015-here-217351,
	title        = {Here be dragons? The perils and promises of inter-resource lexical-semantic mapping},
	abstract     = {Lexical-semantic knowledges sources are a stock item in the language technologist’s toolbox, having proved their practical worth in many and diverse natural language processing (NLP) applications. In linguistics, lexical semantics comes in many flavors, but in the NLP world, wordnets reign more or less supreme. There has been some promising work utilizing Roget-style thesauruses instead, but wider experimentation is hampered by the limited availability of such resources. The work presented here is a first step in the direction of creating a freely available Roget-style lexical resource for modern Swedish. Here, we explore methods for automatic disambiguation of interresource mappings with the longer-term goal of utilizing similar techniques for automatic enrichment of lexical-semantic resources.},
	booktitle    = {Linköping Electronic Conference Proceedings. Semantic resources and semantic annotation for Natural Language Processing and the Digital Humanities. Workshop at NODALIDA , May 11, 13-18 2015, Vilnius},
	author       = {Borin, Lars and Nieto Piña, Luis and Johansson, Richard},
	year         = {2015},
	volume       = {112},
	ISBN         = {978-91-7519-049-5},
	pages        = {1--11},
}

@inProceedings{johansson-nietopina-2015-combining-216865,
	title        = {Combining Relational and Distributional Knowledge for Word Sense Disambiguation},
	abstract     = {We present a new approach to word sense
disambiguation derived from recent ideas
in distributional semantics. The input to
the algorithm is a large unlabeled corpus and a graph describing how senses
are related; no sense-annotated corpus is
needed. The fundamental idea is to embed meaning representations of senses in
the same continuous-valued vector space
as the representations of
words. In this way, the knowledge encoded in the lexical resource is combined with the infor-
mation derived by the distributional methods. Once this step has been carried out,
the sense representations can be plugged
back into e.g. the skip-gram model, which
allows us to compute scores for the different possible senses of a word in a given
context.

We evaluated the new word sense disambiguation system on two Swedish test
sets annotated with senses defined by the
SALDO lexical resource. In both evaluations, our system soundly outperformed
random and first-sense baselines. Its accuracy was slightly above that of a well-
known graph-based system, while being
computationally much more efficient,},
	booktitle    = {Proceedings of the 20th Nordic Conference of Computational Linguistics, May 12-13, Vilnius, Lithuania. Linköping Electronic Conference Proceedings 109, Linköping University Electronic Press..},
	author       = {Johansson, Richard and Nieto Piña, Luis},
	year         = {2015},
	ISBN         = {978-91-7519-098-3},
	pages        = {69--78},
}