@inProceedings{periti-etal-2024-trotr-343017, title = {TRoTR: A Framework for Evaluating the Re-contextualization of Text Reuse}, abstract = {Current approaches for detecting text reuse do not focus on recontextualization, i.e., how the new context(s) of a reused text differs from its original context(s). In this paper, we propose a novel framework called TRoTR that relies on the notion of topic relatedness for evaluating the diachronic change of context in which text is reused. TRoTR includes two NLP tasks: TRiC and TRaC. TRiC is designed to evaluate the topic relatedness between a pair of recontextualizations. TRaC is designed to evaluate the overall topic variation within a set of recontextualizations. We also provide a curated TRoTR benchmark of biblical text reuse, human-annotated with topic relatedness. The benchmark exhibits an inter-annotator agreement of .811. We evaluate multiple, established SBERT models on the TRoTR tasks and find that they exhibit greater sensitivity to textual similarity than topic relatedness. Our experiments show that fine-tuning these models can mitigate such a kind of sensitivity.}, booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}, author = {Periti, Francesco and Cassotti, Pierluigi and Montanelli, Stefano and Tahmasebi, Nina and Schlechtweg, Dominik}, year = {2024}, publisher = {Association for Computational Linguistics}, address = {Miami, Florida, USA}, pages = {13972–13990}, }