Skip to main content

BibTeX

@inProceedings{marvinimperial-etal-2025-universalcefr-356501,
	title        = {UniversalCEFR: Enabling Open Multilingual Research on Language Proficiency Assessment.},
	abstract     = {We introduce UniversalCEFR, a large-scale multilingual multidimensional dataset of texts annotated according to the CEFR (Common European Framework of Reference) scale in 13 languages. To enable open research in both automated readability and language proficiency assessment, UniversalCEFR comprises 505,807 CEFR-labeled texts curated from educational and learner-oriented resources, standardized into a unified data format to support consistent processing, analysis, and modeling across tasks and languages. To demonstrate its utility, we conduct benchmark experiments using three modelling paradigms: a) linguistic feature-based classification, b) fine-tuning pre-trained LLMs, and c) descriptor-based prompting of instruction-tuned LLMs. Our results further support using linguistic features and fine-tuning pretrained models in multilingual CEFR level assessment. Overall, UniversalCEFR aims to establish best practices in data distribution in language proficiency research by standardising dataset formats and promoting their accessibility to the global research community.},
	booktitle    = {Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
	author       = {Marvin Imperial, Joseph and Barayan, Abdullah and Stodden, Regina and Wilkens, Rodrigo and Muñoz Sánchez, Ricardo and Gao, Lingyun and Torgbi, Melissa and Knight, Dawn and Forey, Gail and R. Jablonkai, Reka and Kochmar, Ekaterina and Reynolds, Robert Joshua and Ribeiro, Eugénio and Saggion, Horacio and Volodina, Elena and Vajjala, Sowmya and François, Thomas and Alva-Manchego, Fernando and Tayyar Madabushi, Harish},
	year         = {2025},
	publisher    = {Association for Computational Linguistics},
	pages        = {9714–9766},
}