@inproceedings{espana-bonet-barron-cedeno-2022-undesired,
title = {The (Undesired) Attenuation of Human Biases by Multilinguality},
author = {Cristina Espa{\~n}a-Bonet and Alberto Barrón-Cede{\~n}o},
url = {https://aclanthology.org/2022.emnlp-main.133},
year = {2022},
date = {2022},
booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
pages = {2056–2077},
publisher = {Association for Computational Linguistics},
address = {Online and Abu Dhabi, UAE, Dec 2022},
abstract = {
Some human preferences are universal. The odor of vanilla is perceived as pleasant all around the world. We expect neural models trained on human texts to exhibit these kind of preferences, i.e. biases, but we show that this is not always the case. We explore 16 static and contextual embedding models in 9 languages and, when possible, compare them under similar training conditions. We introduce and release CA-WEAT, multilingual cultural aware tests to quantify biases, and compare them to previous English-centric tests. Our experiments confirm that monolingual static embeddings do exhibit human biases, but values differ across languages, being far from universal. Biases are less evident in contextual models, to the point that the original human association might be reversed. Multilinguality proves to be another variable that attenuates and even reverses the effect of the bias, specially in contextual multilingual models. In order to explain this variance among models and languages, we examine the effect of asymmetries in the training corpus, departures from isomorphism in multilingual embedding spaces and discrepancies in the testing measures between languages.
},
pubstate = {published},
type = {inproceedings}
}