Skip to main content

BibTeX

@inProceedings{daudaravicius-etal-2016-report-248143,
	title        = {A report on the Automatic Evaluation of Scientific Writing Shared Task.},
	abstract     = {The Automated Evaluation of Scientific Writing, or AESW, is the task of identifying sentences in need of correction to ensure their appropriateness in a scientific prose. The data set comes  from  a  professional  editing  company,
VTeX, with two aligned versions of the same text – before and after editing – and covers a variety of textual infelicities that proofreaders have edited.  While previous shared tasks focused solely on grammatical errors (Dale and Kilgarriff, 2011; Dale et al., 2012; Ng et al., 2013;  Ng et al.,  2014),  this time edits cover other  types  of linguistic  misfits  as  well,  including  those  that  almost  certainly  could  be interpreted as style issues and similar “matters of opinion”.   The latter arise because of different language editing traditions, experience,
and the absence of uniform agreement on what “good”  scientific  language  should  look  like. Initiating this task, we expected the participating teams to help identify the characteristics of “good” scientific language, and help create a consensus of which language improvements are acceptable (or necessary).  Six participating teams took on the challenge.},
	booktitle    = {Workshop on Innovative Use of NLP for Building Educational Applications, June 16, 2016, San Diego, CA, USA},
	author       = {Daudaravicius, Vidas  and  E. Banchs, Rafael and Volodina, Elena and Napoles, Courtney },
	year         = {2016},
	ISBN         = {978-1-941643-83-9},
}