Hoppa till huvudinnehåll


	title        = {Automatic Selection of Suitable Sentences  for Language Learning Exercises},
	abstract     = {In this study we investigated second and foreign language (L2) sentence 
readability, an area little explored so far in the case of several languages, including 
Swedish. The outcome of our research consists of two methods for sentence selection 
from native language corpora based on Natural Language Processing (NLP) and 
machine learning (ML) techniques. The two approaches have been made available 
online within Lärka, an Intelligent CALL (ICALL) platform offering activities 
for language learners and students of linguistics. Such an automatic selection 
of suitable sentences can be valuable for L2 teachers during the creation of new 
teaching materials, for L2 students who look for additional self-study exercises as 
well as for lexicographers in search of example sentences to illustrate the meaning 
of a vocabulary item. Members from all these potential user groups evaluated our 
methods and found the majority of the sentences selected suitable for L2 learning 
	booktitle    = {20 Years of EUROCALL: Learning from the Past, Looking to the Future. 2013 EUROCALL Conference, 11th  to 14th September 2013 Évora, Portugal, Proceedings.},
	author       = {Pilán, Ildikó and Volodina, Elena and Johansson, Richard},
	year         = {2013},
	ISBN         = {978-1-908416-12-4},
	pages        = {218--225},

	title        = {Mining semantics for culturomics: towards a knowledge-based approach},
	abstract     = {The massive amounts of text data made available through the Google Books digitization project have inspired a new field of big-data textual research. Named culturomics, this field has attracted the attention of a growing number of scholars over recent years. However, initial studies based on these data have been criticized for not referring to relevant work in linguistics and language technology. This paper provides some ideas, thoughts and first steps towards a new culturomics initiative, based this time on Swedish data, which pursues a more knowledge-based approach than previous work in this emerging field. The amount of new Swedish text produced daily and older texts being digitized in cultural heritage projects grows at an accelerating rate. These volumes of text being available in digital form have grown far beyond the capacity of human readers, leaving automated semantic processing of the texts as the only realistic option for accessing and using the information contained in them. The aim of our recently initiated research program is to advance the state of the art in language technology resources and methods for semantic processing of Big Swedish text and focus on the theoretical and methodological advancement of the state of the art in extracting and correlating information from large volumes of Swedish text using a combination of knowledge-based and statistical methods.},
	booktitle    = {2013 ACM International Workshop on Mining Unstructured Big Data Using Natural Language Processing, UnstructureNLP 2013, Held at 22nd ACM International Conference on Information and Knowledge Management, CIKM 2013; San Francisco, CA; United States; 28 October 2013 through 28 October 2013},
	author       = {Borin, Lars and Dubhashi, Devdatt and Forsberg, Markus and Johansson, Richard and Kokkinakis, Dimitrios and Nugues, Pierre},
	year         = {2013},
	ISBN         = {978-1-4503-2415-1},
	pages        = {3--10},

	title        = {Mining Fine-grained Opinion Expressions with Shallow Parsing},
	abstract     = {Opinion analysis deals with public opinions and trends, but subjective language
is highly ambiguous. In this paper, we
follow a simple data-driven technique to
learn fine-grained opinions. We select
an intersection set of Wall Street Journal documents that is included both in the
Penn Discourse Tree Bank (PDTB) and in
the Multi-Perspective Question Answering (MPQA) corpus. This is done in order to explore the usefulness of discourse-level structure to facilitate the extraction
of fine-grained opinion expressions. Here
we perform shallow parsing of MPQA expressions with connective based discourse
structure, and then also with Named Entities (NE) and some syntax features using
conditional random fields; the latter feature set is basically a collection of NEs and
a bundle of features that is proved to be
useful in a shallow discourse parsing task.
We found that both of the feature-sets are
useful to improve our baseline at different
levels of this fine-grained opinion expression mining task.},
	booktitle    = {Proceedings of the International Conference Recent Advances in Natural Language Processing},
	author       = {Ghosh, Sucheta and Tonelli, Sara and Johansson, Richard},
	year         = {2013},
	pages        = {302--310},

	title        = {Relational Features in Fine-grained Opinion Analysis},
	abstract     = {Fine-grained opinion analysis often makes use of linguistic features but typically does not
take the interaction between opinions into account. This article describes a set of experiments that demonstrate that relational features, mainly derived from dependency-syntactic and semantic role structures, can significantly improve the performance of automatic systems for a number of fine-grained opinion analysis tasks: marking up opinion expressions, finding opinion holders, and determining the polarities of opinion expressions. These features make it possible to model the way opinions expressed in natural-language discourse interact in a sentence over arbitrary distances. The use of relations requires us to consider multiple opinions simultaneously, which makes exact inference intractable. However, a reranker can be used as a sufficiently accurate and efficient approximation.

A number of feature sets and machine learning approaches for the rerankers are evaluated. For the task of opinion expression extraction, the best model shows a 10-point absolute improvement in soft recall on the MPQA corpus over a conventional sequence labeler based on local contextual features, while precision decreases only slightly. Significant improvements are also seen for the extended tasks where holders and polarities are considered: 10 and 7 points in recall, respectively. In addition, the systems outperform previously published results for unlabeled (6 F-measure points) and polarity-labeled (10–15 points) opinion expression extraction. Finally, as an extrinsic evaluation, the extracted MPQA-style opinion expressions are used in practical opinion mining tasks. In all scenarios considered, the machine learning features derived from the opinion expressions lead to statistically significant improvement.},
	journal      = {Computational Linguistics},
	author       = {Johansson, Richard and Moschitti, Alessandro},
	year         = {2013},
	volume       = {39},
	number       = {3},
	pages        = {473--509},

	title        = {Automatic Service Categorisation through Machine Learning in Emergent Middleware},
	booktitle    = {Lecture notes in computer sciences},
	author       = {Bennaceur, Amel and Johansson, Richard and Moschitti, Alessandro and Sykes, Daniel and Issarny, Valérie},
	year         = {2013},
	volume       = {7542},
	pages        = {133--149},

	title        = {Training Parsers on Incompatible Treebanks},
	abstract     = {We consider the problem of training a statistical parser in the
situation when there are multiple treebanks available, and these
treebanks are annotated according to different linguistic

To address this problem, we present two simple adaptation methods:
the first method is based on the idea of using a shared feature
representation when parsing multiple treebanks, and the second method
on guided parsing where the output of one parser provides features
for a second one.

To evaluate and analyze the adaptation methods, we train parsers
on treebank pairs in four languages: German, Swedish, Italian, and English.
We see significant improvements for all eight treebanks when training
on the full training sets. However, the clearest benefits are seen when we
consider smaller training sets. Our experiments were carried out with
unlabeled dependency parsers, but the methods can easily be 
generalized to other feature-based parsers.},
	booktitle    = {Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
	author       = {Johansson, Richard},
	year         = {2013},
	pages        = {127--137},

	title        = {Learning to Rank from Structures in Hierarchical Text Classification},
	abstract     = {In this paper, we model learning to rank algorithms based on structural dependencies in hierarchical multi-label text categorization (TC). Our method uses the classification probability of the binary classifiers of a standard top-down approach to generate k-best hypotheses. The latter are generated according to their global probability while at the same time satisfy the structural constraints between father and children nodes. The rank is then refined using Support Vector Machines and tree kernels applied to a structural representation of hypotheses, i.e., a hierarchy tree in which the outcome of binary one-vs-all classifiers is directly marked in its nodes. Our extensive experiments on the whole Reuters Corpus Volume 1 show that our models significantly improve over the state of the art in TC, thanks to the use of structural dependecies.},
	booktitle    = {Advances in Information Retrieval; 35th European Conference on IR Research, ECIR 2013, Moscow, Russia, March 24-27, 2013; P. Serdyukov et al. (ed)},
	author       = {Ju, Qi and Moschitti, Alessandro and Johansson, Richard},
	year         = {2013},
	volume       = {Lecture Notes in Computer Science 7814},
	ISBN         = {978-3-642-36972-8},
	pages        = {183--194},