@inproceedings{Oualil2017b, title = {A neural network approach for mixing language models}, author = {Youssef Oualil and Dietrich Klakow}, url = {https://arxiv.org/abs/1708.06989}, year = {2017}, date = {2017}, publisher = {ICASSP 2017}, abstract = {The performance of Neural Network (NN)-based language models is steadily improving due to the emergence of new architectures, which are able to learn different natural language characteristics. This paper presents a novel framework, which shows that a significant improvement can be achieved by combining different existing heterogeneous models in a single architecture. This is done through 1) a feature layer, which separately learns different NN-based models and 2) a mixture layer, which merges the resulting model features. In doing so, this architecture benefits from the learning capabilities of each model with no noticeable increase in the number of model parameters or the training time. Extensive experiments conducted on the Penn Treebank (PTB) and the Large Text Compression Benchmark (LTCB) corpus showed a significant reduction of the perplexity when compared to state-of-the-art feedforward as well as recurrent neural network architectures.}, pubstate = {published}, type = {inproceedings} }