@inproceedings{pylypenko-etal-2021-comparing, title = {Comparing Feature-Engineering and Feature-Learning Approaches for Multilingual Translationese Classification}, author = {Daria Pylypenko and Kwabena Amponsah-Kaakyire and Koel Dutta Chowdhury and Josef van Genabith and Cristina Espa{\~n}a-Bonet}, url = {https://aclanthology.org/2021.emnlp-main.676/}, doi = {https://doi.org/10.18653/v1/2021.emnlp-main.676}, year = {2021}, date = {2021}, booktitle = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing}, pages = {8596–8611}, publisher = {Association for Computational Linguistics}, address = {Online and Punta Cana, Dominican Republic}, abstract = {Traditional hand-crafted linguistically-informed features have often been used for distinguishing between translated and original non-translated texts. By contrast, to date, neural architectures without manual feature engineering have been less explored for this task. In this work, we (i) compare the traditional feature-engineering-based approach to the feature-learning-based one and (ii) analyse the neural architectures in order to investigate how well the hand-crafted features explain the variance in the neural models’ predictions. We use pre-trained neural word embeddings, as well as several end-to-end neural architectures in both monolingual and multilingual settings and compare them to feature-engineering-based SVM classifiers. We show that (i) neural architectures outperform other approaches by more than 20 accuracy points, with the BERT-based model performing the best in both the monolingual and multilingual settings; (ii) while many individual hand-crafted translationese features correlate with neural model predictions, feature importance analysis shows that the most important features for neural and classical architectures differ; and (iii) our multilingual experiments provide empirical evidence for translationese universals across languages.}, pubstate = {published}, type = {inproceedings} }