Publications

Hong, Xudong; Sayeed, Asad; Demberg, Vera

Learning Distributed Event Representations with a Multi-Task Approach Inproceedings

Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics, Association for Computational Linguistics, pp. 11-21, New Orleans, USA, 2018.

Human world knowledge contains information about prototypical events and their participants and locations. In this paper, we train the first models using multi-task learning that can both predict missing event participants and also perform semantic role classification based on semantic plausibility. Our best-performing model is an improvement over the previous state-of-the-art on thematic fit modelling tasks. The event embeddings learned by the model can additionally be used effectively in an event similarity task, also outperforming the state-of-the-art.

@inproceedings{Hong2018,
title = {Learning Distributed Event Representations with a Multi-Task Approach},
author = {Xudong Hong and Asad Sayeed and Vera Demberg},
url = {https://aclanthology.org/S18-2002},
doi = {https://doi.org/10.18653/v1/S18-2002},
year = {2018},
date = {2018},
booktitle = {Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics},
pages = {11-21},
publisher = {Association for Computational Linguistics},
address = {New Orleans, USA},
abstract = {Human world knowledge contains information about prototypical events and their participants and locations. In this paper, we train the first models using multi-task learning that can both predict missing event participants and also perform semantic role classification based on semantic plausibility. Our best-performing model is an improvement over the previous state-of-the-art on thematic fit modelling tasks. The event embeddings learned by the model can additionally be used effectively in an event similarity task, also outperforming the state-of-the-art.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   A3

Ostermann, Simon; Seitz, Hannah; Thater, Stefan; Pinkal, Manfred

Mapping Text to Scripts: An Entailment Study Inproceedings

Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, 2018.

Commonsense knowledge as provided by scripts is crucially relevant for text understanding systems, providing a basis for commonsense inference. This paper considers a relevant subtask of script-based text understanding, the task of mapping event mentions in a text to script events. We focus on script representations where events are associated with paraphrase sets, i.e. sets of crowdsourced event descriptions. We provide a detailed annotation of event mention/description pairs with textual entailment types. We demonstrate that representing events in terms of paraphrase sets can massively improve the performance of text-to-script mapping systems. However, for a residual substantial fraction of cases, deeper inference is still required.

@inproceedings{MCScriptb,
title = {Mapping Text to Scripts: An Entailment Study},
author = {Simon Ostermann and Hannah Seitz and Stefan Thater and Manfred Pinkal},
url = {https://www.semanticscholar.org/paper/Mapping-Texts-to-Scripts%3A-An-Entailment-Study-Ostermann-Seitz/7970ec54afb3d78d9f061a38db27d0bd19e215d5},
year = {2018},
date = {2018},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)},
address = {Miyazaki, Japan},
abstract = {Commonsense knowledge as provided by scripts is crucially relevant for text understanding systems, providing a basis for commonsense inference. This paper considers a relevant subtask of script-based text understanding, the task of mapping event mentions in a text to script events. We focus on script representations where events are associated with paraphrase sets, i.e. sets of crowdsourced event descriptions. We provide a detailed annotation of event mention/description pairs with textual entailment types. We demonstrate that representing events in terms of paraphrase sets can massively improve the performance of text-to-script mapping systems. However, for a residual substantial fraction of cases, deeper inference is still required.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   A3

Roth, Michael; Thater, Stefan; Ostermann, Simon; Modi, Ashutosh; Pinkal, Manfred

MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge Inproceedings

Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018), European Language Resources Association (ELRA), Miyazaki, Japan, 2018.

We introduce a large dataset of narrative texts and questions about these texts, intended to be used in a machine comprehension task that requires reasoning using commonsense knowledge. Our dataset complements similar datasets in that we focus on stories about everyday activities, such as going to the movies or working in the garden, and that the questions require commonsense knowledge, or more specifically, script knowledge, to be answered. We show that our mode of data collection via crowdsourcing results in a substantial amount of such inference questions. The dataset forms the basis of a shared task on commonsense and script knowledge organized at SemEval 2018 and provides challenging test cases for the broader natural language understanding community

@inproceedings{MCScript,
title = {MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge},
author = {Michael Roth and Stefan Thater andSimon Ostermann and Ashutosh Modi and Manfred Pinkal},
url = {https://aclanthology.org/L18-1564"},
year = {2018},
date = {2018},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC 2018)},
publisher = {European Language Resources Association (ELRA)},
address = {Miyazaki, Japan},
abstract = {We introduce a large dataset of narrative texts and questions about these texts, intended to be used in a machine comprehension task that requires reasoning using commonsense knowledge. Our dataset complements similar datasets in that we focus on stories about everyday activities, such as going to the movies or working in the garden, and that the questions require commonsense knowledge, or more specifically, script knowledge, to be answered. We show that our mode of data collection via crowdsourcing results in a substantial amount of such inference questions. The dataset forms the basis of a shared task on commonsense and script knowledge organized at SemEval 2018 and provides challenging test cases for the broader natural language understanding community},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   A3

Roth, Michael; Thater, Stefan; Ostermann, Simon; Modi, Ashutosh; Pinkal, Manfred

SemEval-2018 Task 11: Machine Comprehension using Commonsense Knowledge Inproceedings

Proceedings of the 12th International Workshop on Semantic Evaluation, Association for Computational Linguistics, pp. 747-757, New Orleans, Louisiana, 2018.

This report summarizes the results of the SemEval 2018 task on machine comprehension using commonsense knowledge. For this machine comprehension task, we created a new corpus, MCScript. It contains a high number of questions that require commonsense knowledge for finding the correct answer. 11 teams from 4 different countries participated in this shared task, most of them used neural approaches. The best performing system achieves an accuracy of 83.95%, outperforming the baselines by a large margin, but still far from the human upper bound, which was found to be at 98%.

@inproceedings{SemEval2018Task11,
title = {SemEval-2018 Task 11: Machine Comprehension using Commonsense Knowledge},
author = {Michael Roth and Stefan Thater andSimon Ostermann and Ashutosh Modi and Manfred Pinkal},
url = {https://aclanthology.org/S18-1119},
doi = {https://doi.org/10.18653/v1/S18-1119},
year = {2018},
date = {2018},
booktitle = {Proceedings of the 12th International Workshop on Semantic Evaluation},
pages = {747-757},
publisher = {Association for Computational Linguistics},
address = {New Orleans, Louisiana},
abstract = {This report summarizes the results of the SemEval 2018 task on machine comprehension using commonsense knowledge. For this machine comprehension task, we created a new corpus, MCScript. It contains a high number of questions that require commonsense knowledge for finding the correct answer. 11 teams from 4 different countries participated in this shared task, most of them used neural approaches. The best performing system achieves an accuracy of 83.95%, outperforming the baselines by a large margin, but still far from the human upper bound, which was found to be at 98%.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   A3

Delogu, Francesca; Drenhaus, Heiner; Crocker, Matthew W.

On the predictability of event-boundaries in discourse: an ERP investigation Journal Article

Memory and Cognition, 46, pp. 315-325, 2018.

When reading a text describing an everyday activity, comprehenders build a model of the situation described that includes prior knowledge of the entities, locations, and sequences of actions that typically occur within the event. Previous work has demonstrated that such knowledge guides the processing of incoming information by making event boundaries more or less expected.

In the present ERP study, we investigated whether comprehenders’ expectations about event boundaries are influenced by how elaborately common events are described in the context. Participants read short stories in which a common activity (e.g., washing the dishes) was described either in brief or in an elaborate manner. The final sentence contained a target word referring to a more predictable action marking a fine event boundary (e.g., drying) or a less predictable action, marking a coarse event boundary (e.g., jogging). The results revealed a larger N400 effect for coarse event boundaries compared to fine event boundaries, but no interaction with description length. Between 600 and 1000 ms, however, elaborate contexts elicited a larger frontal positivity compared to brief contexts.

This effect was largely driven by less predictable targets, marking coarse event boundaries. We interpret the P600 effect as indexing the updating of the situation model at event boundaries, consistent with Event Segmentation Theory (EST). The updating process is more demanding with coarse event boundaries, which presumably require the construction of a new situation model. Electronic supplementary material The online version of this article (10.3758/s13421-017-0766-4) contains supplementary material, which is available to authorized users.

@article{Delogu2018,
title = {On the predictability of event-boundaries in discourse: an ERP investigation},
author = {Francesca Delogu and Heiner Drenhaus and Matthew W. Crocker},
url = {https://www.researchgate.net/publication/321175597_On_the_predictability_of_event_boundaries_in_discourse_An_ERP_investigation},
doi = {https://doi.org/https://doi.org/10.3758/s13421-017-0766-4},
year = {2018},
date = {2018},
journal = {Memory and Cognition},
pages = {315-325},
volume = {46},
number = {2},
abstract = {When reading a text describing an everyday activity, comprehenders build a model of the situation described that includes prior knowledge of the entities, locations, and sequences of actions that typically occur within the event. Previous work has demonstrated that such knowledge guides the processing of incoming information by making event boundaries more or less expected. In the present ERP study, we investigated whether comprehenders’ expectations about event boundaries are influenced by how elaborately common events are described in the context. Participants read short stories in which a common activity (e.g., washing the dishes) was described either in brief or in an elaborate manner. The final sentence contained a target word referring to a more predictable action marking a fine event boundary (e.g., drying) or a less predictable action, marking a coarse event boundary (e.g., jogging). The results revealed a larger N400 effect for coarse event boundaries compared to fine event boundaries, but no interaction with description length. Between 600 and 1000 ms, however, elaborate contexts elicited a larger frontal positivity compared to brief contexts. This effect was largely driven by less predictable targets, marking coarse event boundaries. We interpret the P600 effect as indexing the updating of the situation model at event boundaries, consistent with Event Segmentation Theory (EST). The updating process is more demanding with coarse event boundaries, which presumably require the construction of a new situation model. Electronic supplementary material The online version of this article (10.3758/s13421-017-0766-4) contains supplementary material, which is available to authorized users.},
pubstate = {published},
type = {article}
}

Copy BibTeX to Clipboard

Projects:   A1 C3

Ankener, Christine; Drenhaus, Heiner; Crocker, Matthew W.; Staudte, Maria

Multimodal Surprisal in the N400 and the Index of Cognitive Activity Inproceedings

Proceedings of the 40th Annual Cognitive Science Society Meeting Proceedings of the 40th Annual Cognitive Science Society Meeting , The Cognitive Science Society, pp. 94-100, Madison, Wisconsin, 2018.

A word’s predictability or surprisal, as determined by cloze probabilities or language models (e.g. Frank, Otten, Galli, & Vigliocco, 2015) is related to processing effort, in that less expected words take more effort to process (e.g. Hale, 2001). A words surprisal, however, may also be influenced by the non-linguistic context, such as visual cues: In the visual world paradigm (VWP), for example, anticipatory eye movements suggest that comprehenders exploit the scene to predict what will be mentioned next (Altmann & Kamide, 1999).

How visual context affects word surprisal and processing effort, however, remains unclear. Here, we present evidence that visually-determined probabilistic expectations for a spoken target word predict graded processing effort for that word, in both pupillometric (ICA) and ERP (N400) measures. These findings demonstrate that the non-linguistic context can immediately influence both lexical expectations, and surprisal-based processing effort.

@inproceedings{Ankener2018,
title = {Multimodal Surprisal in the N400 and the Index of Cognitive Activity},
author = {Christine Ankener and Heiner Drenhaus and Matthew W. Crocker and Maria Staudte},
url = {https://www.researchgate.net/publication/325644935_Multimodal_Surprisal_in_the_N400_and_the_Index_of_Cognitive_Activity},
year = {2018},
date = {2018},
booktitle = {Proceedings of the 40th Annual Cognitive Science Society Meeting},
pages = {94-100},
publisher = {The Cognitive Science Society},
address = {Madison, Wisconsin},
abstract = {A word’s predictability or surprisal, as determined by cloze probabilities or language models (e.g. Frank, Otten, Galli, & Vigliocco, 2015) is related to processing effort, in that less expected words take more effort to process (e.g. Hale, 2001). A words surprisal, however, may also be influenced by the non-linguistic context, such as visual cues: In the visual world paradigm (VWP), for example, anticipatory eye movements suggest that comprehenders exploit the scene to predict what will be mentioned next (Altmann & Kamide, 1999). How visual context affects word surprisal and processing effort, however, remains unclear. Here, we present evidence that visually-determined probabilistic expectations for a spoken target word predict graded processing effort for that word, in both pupillometric (ICA) and ERP (N400) measures. These findings demonstrate that the non-linguistic context can immediately influence both lexical expectations, and surprisal-based processing effort.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Projects:   A1 A5 C3

Raveh, Eran; Steiner, Ingmar; Gessinger, Iona; Möbius, Bernd

Studying Mutual Phonetic Influence With a Web-Based Spoken Dialogue System Inproceedings

20th International Conference on Speech and Computer (SPECOM), Leipzig, Germany, 2018.

This paper presents a study on mutual speech variation influences in a human-computer setting. The study highlights behavioral patterns in data collected as part of a shadowing experiment, and is performed using a novel end-to-end platform for studying phonetic variation in dialogue. It includes a spoken dialogue system capable of detecting and tracking the state of phonetic features in the user’s speech and adapting accordingly. It provides visual and numeric representations of the changes in real time, offering a high degree of customization, and can be used for simulating or reproducing speech variation scenarios. The replicated experiment presented in this paper along with the analysis of the relationship between the human and non-human interlocutors lays the groundwork for a spoken dialogue system with personalized speaking style, which we expect will improve the naturalness and efficiency of human-computer interaction.

@inproceedings{Raveh2018SPECOM,
title = {Studying Mutual Phonetic Influence With a Web-Based Spoken Dialogue System},
author = {Eran Raveh and Ingmar Steiner and Iona Gessinger and Bernd M{\"o}bius},
url = {https://arxiv.org/abs/1809.04945},
year = {2018},
date = {2018},
booktitle = {20th International Conference on Speech and Computer (SPECOM)},
address = {Leipzig, Germany},
abstract = {This paper presents a study on mutual speech variation influences in a human-computer setting. The study highlights behavioral patterns in data collected as part of a shadowing experiment, and is performed using a novel end-to-end platform for studying phonetic variation in dialogue. It includes a spoken dialogue system capable of detecting and tracking the state of phonetic features in the user's speech and adapting accordingly. It provides visual and numeric representations of the changes in real time, offering a high degree of customization, and can be used for simulating or reproducing speech variation scenarios. The replicated experiment presented in this paper along with the analysis of the relationship between the human and non-human interlocutors lays the groundwork for a spoken dialogue system with personalized speaking style, which we expect will improve the naturalness and efficiency of human-computer interaction.

},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C5

Gessinger, Iona; Raveh, Eran; Möbius, Bernd; Steiner, Ingmar

Phonetic Accommodation in HCI: Introducing a Wizard-of-Oz Experiment Inproceedings

Phonetik & Phonologie 14, Vienna, Austria, 2018.

This paper discusses phonetic accommodation of 20 native German speakers interacting with the simulated spoken dialogue system Mirabella in a Wizard-of-Oz experiment. The study examines intonation of wh-questions and pronunciation of allophonic contrasts in German. In a question-and-answer exchange with the system, the users produce predominantly falling intonation patterns for wh-questions when the system does so as well. The number of rising patterns on the part of the users increases significantly when Mirabella produces questions with rising intonation. In a map task, Mirabella provides information about hidden items while producing variants of two allophonic contrasts which are dispreferred by the users. For the [Iç] vs. [Ik] contrast in the suffix h-igi, the number of dispreferred variants on the part of the users increases significantly during the map task. For the [E:] vs. [e:] contrast as a realization of stressed h-a-¨ i, such a convergence effect is not found on the group level, yet still occurs for some individual users. Almost every user converges to the system to a substantial degree for a subset of the examined features, but we also find maintenance of preferred variants and even occasional divergence. This individual variation is in line with previous findings in accommodation research.

@inproceedings{Gessinger2018PuP,
title = {Phonetic Accommodation in HCI: Introducing a Wizard-of-Oz Experiment},
author = {Iona Gessinger and Eran Raveh and Bernd M{\"o}bius and Ingmar Steiner},
url = {https://www.coli.uni-saarland.de/~moebius/documents/gessinger_etal_is2019.pdf},
year = {2018},
date = {2018-09-06},
booktitle = {Phonetik & Phonologie 14},
address = {Vienna, Austria},
abstract = {This paper discusses phonetic accommodation of 20 native German speakers interacting with the simulated spoken dialogue system Mirabella in a Wizard-of-Oz experiment. The study examines intonation of wh-questions and pronunciation of allophonic contrasts in German. In a question-and-answer exchange with the system, the users produce predominantly falling intonation patterns for wh-questions when the system does so as well. The number of rising patterns on the part of the users increases significantly when Mirabella produces questions with rising intonation. In a map task, Mirabella provides information about hidden items while producing variants of two allophonic contrasts which are dispreferred by the users. For the [Iç] vs. [Ik] contrast in the suffix h-igi, the number of dispreferred variants on the part of the users increases significantly during the map task. For the [E:] vs. [e:] contrast as a realization of stressed h-a-¨ i, such a convergence effect is not found on the group level, yet still occurs for some individual users. Almost every user converges to the system to a substantial degree for a subset of the examined features, but we also find maintenance of preferred variants and even occasional divergence. This individual variation is in line with previous findings in accommodation research.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C5

Tourtouri, Elli; Sikos, Les; Crocker, Matthew W.

Referential entropy influences the production of overspecifications Miscellaneous

10th Dubrovnik Conference on Cognitive Science, Communication, Pragmatics, and Theory of Mind (DuCog), University of Zagreb, Dubrovnik, Croatia, 2018.

@miscellaneous{Tourtourietal2018b,
title = {Referential entropy influences the production of overspecifications},
author = {Elli Tourtouri and Les Sikos and Matthew W. Crocker},
url = {https://www.mpi.nl/publications/item3310165/referential-entropy-influences-production-overspecifications},
year = {2018},
date = {2018},
booktitle = {10th Dubrovnik Conference on Cognitive Science, Communication, Pragmatics, and Theory of Mind (DuCog)},
publisher = {University of Zagreb},
address = {Dubrovnik, Croatia},
pubstate = {published},
type = {miscellaneous}
}

Copy BibTeX to Clipboard

Project:   C3

Malisz, Zofia; Brandt, Erika; Möbius, Bernd; Oh, Yoon Mi; Andreeva, Bistra

Dimensions of segmental variability: interaction of prosody and surprisal in six languages Journal Article

Frontiers in Communication / Language Sciences, 3, pp. 1-18, 2018.

Contextual predictability variation affects phonological and phonetic structure. Reduction and expansion of acoustic-phonetic features is also characteristic of prosodic variability. In this study, we assess the impact of surprisal and prosodic structure on phonetic encoding, both independently of each other and in interaction. We model segmental duration, vowel space size and spectral characteristics of vowels and consonants as a function of surprisal as well as of syllable prominence, phrase boundary, and speech rate. Correlates of phonetic encoding density are extracted from a subset of the BonnTempo corpus for six languages: American English, Czech, Finnish, French, German, and Polish. Surprisal is estimated from segmental n-gram language models trained on large text corpora. Our findings are generally compatible with a weak version of Aylett and Turk’s Smooth Signal Redundancy hypothesis, suggesting that prosodic structure mediates between the requirements of efficient communication and the speech signal. However, this mediation is not perfect, as we found evidence for additional, direct effects of changes in surprisal on the phonetic structure of utterances. These effects appear to be stable across different speech rates.

@article{Malisz2018,
title = {Dimensions of segmental variability: interaction of prosody and surprisal in six languages},
author = {Zofia Malisz and Erika Brandt and Bernd M{\"o}bius and Yoon Mi Oh and Bistra Andreeva},
url = {https://www.frontiersin.org/articles/10.3389/fcomm.2018.00025/full},
doi = {https://doi.org/10.3389/fcomm.2018.00025},
year = {2018},
date = {2018-07-20},
journal = {Frontiers in Communication / Language Sciences},
pages = {1-18},
volume = {3},
number = {25},
abstract = {Contextual predictability variation affects phonological and phonetic structure. Reduction and expansion of acoustic-phonetic features is also characteristic of prosodic variability. In this study, we assess the impact of surprisal and prosodic structure on phonetic encoding, both independently of each other and in interaction. We model segmental duration, vowel space size and spectral characteristics of vowels and consonants as a function of surprisal as well as of syllable prominence, phrase boundary, and speech rate. Correlates of phonetic encoding density are extracted from a subset of the BonnTempo corpus for six languages: American English, Czech, Finnish, French, German, and Polish. Surprisal is estimated from segmental n-gram language models trained on large text corpora. Our findings are generally compatible with a weak version of Aylett and Turk's Smooth Signal Redundancy hypothesis, suggesting that prosodic structure mediates between the requirements of efficient communication and the speech signal. However, this mediation is not perfect, as we found evidence for additional, direct effects of changes in surprisal on the phonetic structure of utterances. These effects appear to be stable across different speech rates.},
pubstate = {published},
type = {article}
}

Copy BibTeX to Clipboard

Project:   C1

Menzel, Katrin

Using diachronic corpora of scientific journal articles for complementing English corpus-based dictionaries and lexicographical resources for specialized languages Inproceedings

Proceedings of EURALEX2018, Ljubljana University Press, Faculty of Arts, Ljubljana, Slovenia, 2018, ISBN 978-961-06-0097-8.

As technology and science permeate nearly all areas of life in modern times, there is a certain trend for standard dictionaries to bolster their technical and scientific vocabulary and to identify more components, for instance more combining forms, in technical terms and terminological phrases. In this paper it is argued that recently built diachronic corpora of scientific journal articles with robust linguistic and metadata-based features are important resources for complementing English corpus-based dictionaries and lexicographical resources for specialized languages. The Royal Society Corpus (RSC, ca. 9,800 digitized texts, 32 million tokens) in combination with the Scientific Text Corpus (SciTex, ca. 5,000 documents, 39 million tokens), as two recently created corpus resources, offer the possibility to provide a fuller picture of the development of specialized vocabulary and of the number of meanings that general and technical terms have accumulated during their history. They facilitate the systematic identification of lexemes with specific linguistic characteristics or from selected disciplines and fields, and allow us to gain a better understanding of the development of academic writing in English scientific periodicals across several centuries, from their beginnings to the present day.

@inproceedings{Menzel2017b,
title = {Using diachronic corpora of scientific journal articles for complementing English corpus-based dictionaries and lexicographical resources for specialized languages},
author = {Katrin Menzel},
url = {https://euralex.org/publications/using-diachronic-corpora-of-scientific-journal-articles-for-complementing-english-corpus-based-dictionaries-and-lexicographical-resources-for-specialized-languages/},
year = {2018},
date = {2018},
booktitle = {Proceedings of EURALEX2018},
isbn = {978-961-06-0097-8},
publisher = {Ljubljana University Press, Faculty of Arts},
address = {Ljubljana, Slovenia},
abstract = {As technology and science permeate nearly all areas of life in modern times, there is a certain trend for standard dictionaries to bolster their technical and scientific vocabulary and to identify more components, for instance more combining forms, in technical terms and terminological phrases. In this paper it is argued that recently built diachronic corpora of scientific journal articles with robust linguistic and metadata-based features are important resources for complementing English corpus-based dictionaries and lexicographical resources for specialized languages. The Royal Society Corpus (RSC, ca. 9,800 digitized texts, 32 million tokens) in combination with the Scientific Text Corpus (SciTex, ca. 5,000 documents, 39 million tokens), as two recently created corpus resources, offer the possibility to provide a fuller picture of the development of specialized vocabulary and of the number of meanings that general and technical terms have accumulated during their history. They facilitate the systematic identification of lexemes with specific linguistic characteristics or from selected disciplines and fields, and allow us to gain a better understanding of the development of academic writing in English scientific periodicals across several centuries, from their beginnings to the present day.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   B1

Yung, Frances Pik Yu; Demberg, Vera

Do speakers produce discourse connectives rationally? Inproceedings

Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing, Association for Computational Linguistics, pp. 6-16, Melbourne, Australia, 2018.

A number of different discourse connectives can be used to mark the same discourse relation, but it is unclear what factors affect connective choice. One recent account is the Rational Speech Acts theory, which predicts that speakers try to maximize the informativeness of an utterance such that the listener can interpret the intended meaning correctly. Existing prior work uses referential language games to test the rational account of speakers{‚} production of concrete meanings, such as identification of objects within a picture. Building on the same paradigm, we design a novel Discourse Continuation Game to investigate speakers{‚} production of abstract discourse relations. Experimental results reveal that speakers significantly prefer a more informative connective, in line with predictions of the RSA model.

@inproceedings{Yung2019b,
title = {Do speakers produce discourse connectives rationally?},
author = {Frances Pik Yu Yung and Vera Demberg},
url = {https://aclanthology.org/W18-2802},
doi = {https://doi.org/10.18653/v1/W18-2802},
year = {2018},
date = {2018},
booktitle = {Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing},
pages = {6-16},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {A number of different discourse connectives can be used to mark the same discourse relation, but it is unclear what factors affect connective choice. One recent account is the Rational Speech Acts theory, which predicts that speakers try to maximize the informativeness of an utterance such that the listener can interpret the intended meaning correctly. Existing prior work uses referential language games to test the rational account of speakers{'} production of concrete meanings, such as identification of objects within a picture. Building on the same paradigm, we design a novel Discourse Continuation Game to investigate speakers{'} production of abstract discourse relations. Experimental results reveal that speakers significantly prefer a more informative connective, in line with predictions of the RSA model.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   B2

Gessinger, Iona; Schweitzer, Antje; Andreeva, Bistra; Raveh, Eran; Möbius, Bernd; Steiner, Ingmar

Convergence of Pitch Accents in a Shadowing Task Inproceedings

Proceedings of the 9th International Conference on Speech Prosody, Speech Prosody Special Interest Group, pp. 225-229, Poznán, Poland, 2018.

In the present study, a corpus of short German sentences collected in a shadowing task was examined with respect to pitch accent realization. The pitch accents were parameterized with the PaIntE model, which describes the f0 contour of intonation events concerning their height, slope, and temporal alignment. Convergence was quantified as decrease in Euclidean distance, and hence increase in similarity, between the PaIntE parameter vectors. This was assessed for three stimulus types: natural speech, diphone based speech synthesis, or HMM based speech synthesis. The factors tested in the analysis were experimental phase – was the sentence uttered before or while shadowing the model, accent type – a distinction was made between prenuclear and nuclear pitch accents, and sex of speaker and shadowed model. For the natural and HMM stimuli, Euclidean distance decreased in the shadowing task. This convergence effect did not depend on the accent type. However, prenuclear pitch accents showed generally lower values in Euclidean distance than nuclear pitch accents. Whether the sex of the speaker and the shadowed model matched did not explain any variance in the data. For the diphone stimuli, no convergence of pitch accents was observed.

@inproceedings{Gessinger2018SP,
title = {Convergence of Pitch Accents in a Shadowing Task},
author = {Iona Gessinger and Antje Schweitzer and Bistra Andreeva and Eran Raveh and Bernd M{\"o}bius and Ingmar Steiner},
url = {https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/29618},
doi = {https://doi.org/10.21437/SpeechProsody.2018-46},
year = {2018},
date = {2018},
booktitle = {Proceedings of the 9th International Conference on Speech Prosody},
pages = {225-229},
publisher = {Speech Prosody Special Interest Group},
address = {Pozn{\'a}n, Poland},
abstract = {In the present study, a corpus of short German sentences collected in a shadowing task was examined with respect to pitch accent realization. The pitch accents were parameterized with the PaIntE model, which describes the f0 contour of intonation events concerning their height, slope, and temporal alignment. Convergence was quantified as decrease in Euclidean distance, and hence increase in similarity, between the PaIntE parameter vectors. This was assessed for three stimulus types: natural speech, diphone based speech synthesis, or HMM based speech synthesis. The factors tested in the analysis were experimental phase - was the sentence uttered before or while shadowing the model, accent type - a distinction was made between prenuclear and nuclear pitch accents, and sex of speaker and shadowed model. For the natural and HMM stimuli, Euclidean distance decreased in the shadowing task. This convergence effect did not depend on the accent type. However, prenuclear pitch accents showed generally lower values in Euclidean distance than nuclear pitch accents. Whether the sex of the speaker and the shadowed model matched did not explain any variance in the data. For the diphone stimuli, no convergence of pitch accents was observed.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C5

Sanders, Ted J. M.; Demberg, Vera; Hoek, Jet; Scholman, Merel; Torabi Asr, Fatemeh; Zufferey, Sandrine; Evers-Vermeul, Jacqueline

Unifying dimensions in coherence relations: How various annotation frameworks are related Journal Article

Corpus Linguistics and Linguistic Theory, 2018.

In this paper, we show how three often used and seemingly different discourse annotation frameworks – Penn Discourse Treebank (PDTB), Rhetorical Structure Theory (RST), and Segmented Discourse Representation Theory – can be related by using a set of unifying dimensions. These dimensions are taken from the Cognitive approach to Coherence Relations and combined with more fine-grained additional features from the frameworks themselves to yield a posited set of dimensions that can successfully map three frameworks. The resulting interface will allow researchers to find identical or at least closely related relations within sets of annotated corpora, even if they are annotated within different frameworks. Furthermore, we tested our unified dimension (UniDim) approach by comparing PDTB and RST annotations of identical newspaper texts and converting their original end label annotations of relations into the accompanying values per dimension. Subsequently, rates of overlap in the attributed values per dimension were analyzed. Results indicate that the proposed dimensions indeed create an interface that makes existing annotation systems “talk to each other.”

@article{Sanders2018,
title = {Unifying dimensions in coherence relations: How various annotation frameworks are related},
author = {Ted J. M. Sanders and Vera Demberg and Jet Hoek and Merel Scholman and Fatemeh Torabi Asr and Sandrine Zufferey and Jacqueline Evers-Vermeul},
url = {https://www.degruyter.com/document/doi/10.1515/cllt-2016-0078/html},
doi = {https://doi.org/10.1515/cllt-2016-0078},
year = {2018},
date = {2018-05-22},
journal = {Corpus Linguistics and Linguistic Theory},
abstract = {In this paper, we show how three often used and seemingly different discourse annotation frameworks – Penn Discourse Treebank (PDTB), Rhetorical Structure Theory (RST), and Segmented Discourse Representation Theory – can be related by using a set of unifying dimensions. These dimensions are taken from the Cognitive approach to Coherence Relations and combined with more fine-grained additional features from the frameworks themselves to yield a posited set of dimensions that can successfully map three frameworks. The resulting interface will allow researchers to find identical or at least closely related relations within sets of annotated corpora, even if they are annotated within different frameworks. Furthermore, we tested our unified dimension (UniDim) approach by comparing PDTB and RST annotations of identical newspaper texts and converting their original end label annotations of relations into the accompanying values per dimension. Subsequently, rates of overlap in the attributed values per dimension were analyzed. Results indicate that the proposed dimensions indeed create an interface that makes existing annotation systems “talk to each other.”},
pubstate = {published},
type = {article}
}

Copy BibTeX to Clipboard

Project:   B2

Steiner, Ingmar; Le Maguer, Sébastien

Creating New Language and Voice Components for the Updated MaryTTS Text-to-Speech Synthesis Platform Inproceedings

11th Language Resources and Evaluation Conference (LREC), pp. 3171-3175, Miyazaki, Japan, 2018.

We present a new workflow to create components for the MaryTTS text-to-speech synthesis platform, which is popular with researchers and developers, extending it to support new languages and custom synthetic voices. This workflow replaces the previous toolkit with an efficient, flexible process that leverages modern build automation and cloud-hosted infrastructure. Moreover, it is compatible with the updated MaryTTS architecture, enabling new features and state-of-the-art paradigms such as synthesis based on deep neural networks (DNNs). Like MaryTTS itself, the new tools are free, open source software (FOSS), and promote the use of open data.

@inproceedings{Steiner2018LREC,
title = {Creating New Language and Voice Components for the Updated MaryTTS Text-to-Speech Synthesis Platform},
author = {Ingmar Steiner and S{\'e}bastien Le Maguer},
url = {https://arxiv.org/abs/1712.04787},
year = {2018},
date = {2018-05-10},
booktitle = {11th Language Resources and Evaluation Conference (LREC)},
pages = {3171-3175},
address = {Miyazaki, Japan},
abstract = {We present a new workflow to create components for the MaryTTS text-to-speech synthesis platform, which is popular with researchers and developers, extending it to support new languages and custom synthetic voices. This workflow replaces the previous toolkit with an efficient, flexible process that leverages modern build automation and cloud-hosted infrastructure. Moreover, it is compatible with the updated MaryTTS architecture, enabling new features and state-of-the-art paradigms such as synthesis based on deep neural networks (DNNs). Like MaryTTS itself, the new tools are free, open source software (FOSS), and promote the use of open data.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C5

Zimmerer, Frank; Brandt, Erika; Andreeva, Bistra; Möbius, Bernd

Idiomatic or literal? Production of collocations in German read speech Inproceedings

Proc. Speech Prosody 2018, pp. 428-432, Poznan, 2018.

Collocations have been identified as an interesting field to study the effects of frequency of occurrence in language and speech. We report results of a production experiment including a duration analysis based on the production of German collocations. The collocations occurred in a condition where the phrase was produced with a literal meaning and in another condition where it was idiomatic. A durational difference was found for the collocations, which were reduced in the idiomatic condition. This difference was also observed for the function word und (‘and’) in collocations like Mord und Totschlag (‘murder and manslaughter’). However, an analysis of the vowel /U/ of the function word did not show a durational difference. Some explanations as to why speakers showed different patterns of reduction (not all collocations were produced with a shorter duration in the idiomatic condition by all speakers) and why not all speakers use the durational cue (one out of eight speakers produced the conditions identically) are proposed.

@inproceedings{Zimmerer2018SpPro,
title = {Idiomatic or literal? Production of collocations in German read speech},
author = {Frank Zimmerer and Erika Brandt and Bistra Andreeva and Bernd M{\"o}bius},
url = {https://www.isca-speech.org/archive/speechprosody_2018/zimmerer18_speechprosody.html},
doi = {https://doi.org/10.21437/SpeechProsody.2018-87},
year = {2018},
date = {2018},
booktitle = {Proc. Speech Prosody 2018},
pages = {428-432},
address = {Poznan},
abstract = {Collocations have been identified as an interesting field to study the effects of frequency of occurrence in language and speech. We report results of a production experiment including a duration analysis based on the production of German collocations. The collocations occurred in a condition where the phrase was produced with a literal meaning and in another condition where it was idiomatic. A durational difference was found for the collocations, which were reduced in the idiomatic condition. This difference was also observed for the function word und (‘and’) in collocations like Mord und Totschlag (‘murder and manslaughter’). However, an analysis of the vowel /U/ of the function word did not show a durational difference. Some explanations as to why speakers showed different patterns of reduction (not all collocations were produced with a shorter duration in the idiomatic condition by all speakers) and why not all speakers use the durational cue (one out of eight speakers produced the conditions identically) are proposed.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C1

Brandt, Erika; Zimmerer, Frank; Andreeva, Bistra; Möbius, Bernd

Impact of prosodic structure and information density on dynamic formant trajectories in German Inproceedings

Klessa, Katarzyna; Bachan, Jolanta; Wagner, Agnieszka; Karpiński, Maciej; Śledziński, Daniel (Ed.): Speech Prosody 2018, Speech Prosody Special Interest Group, pp. 119-123, Urbana, 2018, ISSN 2333-2042.

This study investigated the influence of prosodic structure and information density (ID), defined as contextual predictability, on vowel-inherent spectral change (VISC). We extracted formant measurements from the onset and offset of the vowels of a large German corpus of newspaper read speech. Vector length (VL), the Euclidean distance between F1 and F2 trajectory, and F1 and F2 slope, formant deltas of onset and offset relative to vowel duration, were calculated as measures of formant change. ID factors were word frequency and phoneme-based surprisal measures, while the prosodic factors contained global and local articulation rate, primary lexical stress, and prosodic boundary. We expected that vowels increased in spectral change when they were difficult to predict from the context, or stood in low-frequency words while controlling for known effects of prosodic structure. The ID effects were assumed to be modulated by prosodic factors to a certain extent. We confirmed our hypotheses for VL, and found expected independent effects of prosody and ID on F1 slope and F2 slope.

@inproceedings{Brandt2018SpPro,
title = {Impact of prosodic structure and information density on dynamic formant trajectories in German},
author = {Erika Brandt and Frank Zimmerer and Bistra Andreeva and Bernd M{\"o}bius},
editor = {Katarzyna Klessa and Jolanta Bachan and Agnieszka Wagner and Maciej Karpiński and Daniel Śledziński},
url = {https://www.researchgate.net/publication/325744530_Impact_of_prosodic_structure_and_information_density_on_dynamic_formant_trajectories_in_German},
doi = {https://doi.org/10.22028/D291-32050},
year = {2018},
date = {2018},
booktitle = {Speech Prosody 2018},
issn = {2333-2042},
pages = {119-123},
publisher = {Speech Prosody Special Interest Group},
address = {Urbana},
abstract = {This study investigated the influence of prosodic structure and information density (ID), defined as contextual predictability, on vowel-inherent spectral change (VISC). We extracted formant measurements from the onset and offset of the vowels of a large German corpus of newspaper read speech. Vector length (VL), the Euclidean distance between F1 and F2 trajectory, and F1 and F2 slope, formant deltas of onset and offset relative to vowel duration, were calculated as measures of formant change. ID factors were word frequency and phoneme-based surprisal measures, while the prosodic factors contained global and local articulation rate, primary lexical stress, and prosodic boundary. We expected that vowels increased in spectral change when they were difficult to predict from the context, or stood in low-frequency words while controlling for known effects of prosodic structure. The ID effects were assumed to be modulated by prosodic factors to a certain extent. We confirmed our hypotheses for VL, and found expected independent effects of prosody and ID on F1 slope and F2 slope.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   C1

Shen, Xiaoyu; Su, Hui; Niu, Shuzi; Demberg, Vera

Improving Variational Encoder-Decoders in Dialogue Generation Inproceedings

32nd AAAI Conference on Artificial Intelligence (AAAI-18), New Orleans, USA, 2018.

Variational encoder-decoders (VEDs) have shown promising results in dialogue generation. However, the latent variable distributions are usually approximated by a much simpler model than the powerful RNN structure used for encoding and decoding, yielding the KL-vanishing problem and inconsistent training objective. In this paper, we separate the training step into two phases: The first phase learns to autoencode discrete texts into continuous embeddings, from which the second phase learns to generalize latent representations by reconstructing the encoded embedding. In this case, latent variables are sampled by transforming Gaussian noise through multi-layer perceptrons and are trained with a separate VED model, which has the potential of realizing a much more flexible distribution. We compare our model with current popular models and the experiment demonstrates substantial improvement in both metric-based and human evaluations.

@inproceedings{Shen2018,
title = {Improving Variational Encoder-Decoders in Dialogue Generation},
author = {Xiaoyu Shen and Hui Su and Shuzi Niu and Vera Demberg},
url = {https://arxiv.org/abs/1802.02032},
year = {2018},
date = {2018-02-02},
publisher = {32nd AAAI Conference on Artificial Intelligence (AAAI-18)},
address = {New Orleans, USA},
abstract = {Variational encoder-decoders (VEDs) have shown promising results in dialogue generation. However, the latent variable distributions are usually approximated by a much simpler model than the powerful RNN structure used for encoding and decoding, yielding the KL-vanishing problem and inconsistent training objective. In this paper, we separate the training step into two phases: The first phase learns to autoencode discrete texts into continuous embeddings, from which the second phase learns to generalize latent representations by reconstructing the encoded embedding. In this case, latent variables are sampled by transforming Gaussian noise through multi-layer perceptrons and are trained with a separate VED model, which has the potential of realizing a much more flexible distribution. We compare our model with current popular models and the experiment demonstrates substantial improvement in both metric-based and human evaluations.},
pubstate = {published},
type = {inproceedings}
}

Copy BibTeX to Clipboard

Project:   A4

Steiner, Ingmar; Le Maguer, Sébastien; Hewer, Alexander

Synthesis of Tongue Motion and Acoustics from Text using a Multimodal Articulatory Database Journal Article

IEEE/ACM Transactions on Audio, Speech, and Language Processing, 25, pp. 2351-2361, 2017.

We present an end-to-end text-to-speech (TTS) synthesis system that generates audio and synchronized tongue motion directly from text. This is achieved by adapting a 3D model of the tongue surface to an articulatory dataset and training a statistical parametric speech synthesis system directly on the tongue model parameters. We evaluate the model at every step by comparing the spatial coordinates of predicted articulatory movements against the reference data. The results indicate a global mean Euclidean distance of less than 2.8 mm, and our approach can be adapted to add an articulatory modality to conventional TTS applications without the need for extra data.

@article{Steiner2017TASLP,
title = {Synthesis of Tongue Motion and Acoustics from Text using a Multimodal Articulatory Database},
author = {Ingmar Steiner and S{\'e}bastien Le Maguer and Alexander Hewer},
url = {https://arxiv.org/abs/1612.09352},
year = {2017},
date = {2017},
journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
pages = {2351-2361},
volume = {25},
number = {12},
abstract = {We present an end-to-end text-to-speech (TTS) synthesis system that generates audio and synchronized tongue motion directly from text. This is achieved by adapting a 3D model of the tongue surface to an articulatory dataset and training a statistical parametric speech synthesis system directly on the tongue model parameters. We evaluate the model at every step by comparing the spatial coordinates of predicted articulatory movements against the reference data. The results indicate a global mean Euclidean distance of less than 2.8 mm, and our approach can be adapted to add an articulatory modality to conventional TTS applications without the need for extra data.},
pubstate = {published},
type = {article}
}

Copy BibTeX to Clipboard

Project:   C5

Fischer, Andrea; Vreeken, Jilles; Klakow, Dietrich

Beyond Pairwise Similarity: Quantifying and Characterizing Linguistic Similarity between Groups of Languages by MDL Journal Article

Computación y Systems, 21, pp. 829-839, 2017.
We present a minimum description length based algorithm for finding the regular correspondences between related languages and show how it can be used to quantify the similarity between not only pairs, but whole groups of languages directly from cognate sets. We employ a two-part code, which allows to use the data and model complexity of the discovered correspondences as information-theoretic quantifications of the degree of regularity of cognate realizations in these languages. Unlike previous work, our approach is not limited to pairs of languages, does not limit the size of discovered correspondences, does not make assumptions about the shape or distribution of correspondences, and requires no expert knowledge or fine-tuning of parameters. We here test our approach on the Slavic languages. In a pairwise analysis of 13 Slavic languages, we show that our algorithm replicates their linguistic classification exactly. In a four-language experiment, we demonstrate how our algorithm efficiently quantifies similarity between all subsets of the analyzed four languages and find that it is excellently suited to quantifying the orthographic regularity of closely-related languages.

@article{Fischer2017,
title = {Beyond Pairwise Similarity: Quantifying and Characterizing Linguistic Similarity between Groups of Languages by MDL},
author = {Andrea Fischer and Jilles Vreeken and Dietrich Klakow},
url = {http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/2865},
year = {2017},
date = {2017},
journal = {Computación y Systems},
pages = {829-839},
volume = {21},
number = {4},
abstract = {

We present a minimum description length based algorithm for finding the regular correspondences between related languages and show how it can be used to quantify the similarity between not only pairs, but whole groups of languages directly from cognate sets. We employ a two-part code, which allows to use the data and model complexity of the discovered correspondences as information-theoretic quantifications of the degree of regularity of cognate realizations in these languages. Unlike previous work, our approach is not limited to pairs of languages, does not limit the size of discovered correspondences, does not make assumptions about the shape or distribution of correspondences, and requires no expert knowledge or fine-tuning of parameters. We here test our approach on the Slavic languages. In a pairwise analysis of 13 Slavic languages, we show that our algorithm replicates their linguistic classification exactly. In a four-language experiment, we demonstrate how our algorithm efficiently quantifies similarity between all subsets of the analyzed four languages and find that it is excellently suited to quantifying the orthographic regularity of closely-related languages.
},
pubstate = {published},
type = {article}
}

Copy BibTeX to Clipboard

Project:   C4

Successfully