Hoppa till huvudinnehåll

BibTeX

@misc{cousse-etal-2023-inget-324690,
	title        = {Inget stöd i forskningen för att de/dem slås ut},
	author       = {Coussé, Evie and Adesam, Yvonne and Berdicevskis, Aleksandrs},
	year         = {2023},
	number       = {2023-03-20},
}

@incollection{tahmasebi-dubossarsky-2023-computational-325543,
	title        = {Computational modeling of semantic change},
	abstract     = {In this chapter we provide an overview of computational modeling for semantic change using large and semi-large textual corpora. We aim to provide a key for the interpretation of relevant methods and evaluation techniques, and also provide insights into important aspects of the computational study of semantic change. We discuss the pros and cons of different classes of models with respect to the properties of the data from which one wishes to model semantic change, and which avenues are available to evaluate the results. This chapter is forthcoming as the book has not yet been published. },
	booktitle    = {Routledge Handbook of Historical Linguistics, 2nd edition},
	author       = {Tahmasebi, Nina and Dubossarsky, Haim},
	year         = {2023},
	publisher    = {Routledge},
}

@article{berdicevskis-etal-2024-drop-326112,
	title        = {To drop or not to drop? Predicting the omission of the infinitival marker in a Swedish future construction},
	abstract     = {We investigate the optional omission of the infinitival marker in a Swedish future tense construction. During the last two decades the frequency of omission has been rapidly increasing, and this process has received considerable attention in the literature. We test whether the knowledge which has been accumulated can yield accurate predictions of language variation and change. We extracted all occurrences of the construction from a very large collection of corpora. The dataset was automatically annotated with language-internal predictors which have previously been shown or hypothesized to affect the variation. We trained several models in order to make two kinds of predictions: whether the marker will be omitted in a specific utterance and how large the proportion of omissions will be for a given time period. For most of the approaches we tried, we were not able to achieve a better-than-baseline performance. The only exception was predicting the proportion of omissions using autoregressive integrated moving average models for one-step-ahead forecast, and in this case time was the only predictor that mattered. Our data suggest that most of the language-internal predictors do have some effect on the variation, but the effect is not strong enough to yield reliable predictions.},
	journal      = {Corpus Linguistics and Linguistic Theory},
	author       = {Berdicevskis, Aleksandrs and Coussé, Evie and Koplenig, Alexander and Adesam, Yvonne},
	year         = {2024},
	volume       = {20},
	number       = {1},
	pages        = {219–261},
}

@article{ehret-etal-2023-measuring-326113,
	title        = {Measuring language complexity: challenges and opportunities},
	journal      = {Linguistics Vanguard},
	author       = {Ehret, Katharina and Berdicevskis, Aleksandrs and Bentz, Christian and Blumenthal-Dramé, Alice},
	year         = {2023},
	volume       = {9},
	pages        = {1--8},
}

@inProceedings{berdicevskis-erbro-2023-tomato-326355,
	title        = {You say tomato, I say the same: A large-scale study of linguistic accommodation in online communities},
	booktitle    = {Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)},
	author       = {Berdicevskis, Aleksandrs and Erbro, Viktor},
	year         = {2023},
	ISBN         = {978-99-1621-999-7},
}