2013.bib

@comment{{This file has been generated by bib2bib 1.92}}
@comment{{Command line: /home/korin/bibtex2html-1.92-LINUX/bib2bib -oc /home/korin/projects/publications/new_output/transitdata/2013-citations -ob /home/korin/projects/publications/new_output/transitdata/2013.bib -c 'year : "2013"' /home/korin/projects/publications/filtlists/full_publications_list.bib}}
@phdthesis{Cassia_PhD13,
  author = {Valentini-Botinhao, Cassia},
  school = {University of Edinburgh},
  title = {Intelligibility enhancement of synthetic speech in noise},
  abstract = {Speech technology can facilitate human-machine interaction and create new communication interfaces. Text-To-Speech (TTS) systems provide speech output for dialogue, notification and reading applications as well as personalized voices for people that have lost the use of their own. TTS systems are built to produce synthetic voices that should sound as natural, expressive and intelligible as possible and if necessary be similar to a particular speaker. Although naturalness is an important requirement, providing the correct information in adverse conditions can be crucial to certain applications. Speech that adapts or reacts to different listening conditions can in turn be more expressive and natural. In this work we focus on enhancing the intelligibility of TTS voices in additive noise. For that we adopt the statistical parametric paradigm for TTS in the shape of a hidden Markov model (HMM-) based speech synthesis system that allows for flexible enhancement strategies. Little is known about which human speech production mechanisms actually increase intelligibility in noise and how the choice of mechanism relates to noise type, so we approached the problem from another perspective: using mathematical models for hearing speech in noise. To find which models are better at predicting intelligibility of TTS in noise we performed listening evaluations to collect subjective intelligibility scores which we then compared to the models’ predictions. In these evaluations we observed that modifications performed on the spectral envelope of speech can increase intelligibility significantly, particularly if the strength of the modification depends on the noise and its level. We used these findings to inform the decision of which of the models to use when automatically modifying the spectral envelope of the speech according to the noise. We devised two methods, both involving cepstral coefficient modifications. The first was applied during extraction while training the acoustic models and the other when generating a voice using pre-trained TTS models. The latter has the advantage of being able to address fluctuating noise. To increase intelligibility of synthetic speech at generation time we proposed a method for Mel cepstral coefficient modification based on the glimpse proportion measure, the most promising of the models of speech intelligibility that we evaluated. An extensive series of listening experiments demonstrated that this method brings significant intelligibility gains to TTS voices while not requiring additional recordings of clear or Lombard speech. To further improve intelligibility we combined our method with noise-independent enhancement approaches based on the acoustics of highly intelligible speech. This combined solution was as effective for stationary noise as for the challenging competing speaker scenario, obtaining up to 4dB of equivalent intensity gain. Finally, we proposed an extension to the speech enhancement paradigm to account for not only energetic masking of signals but also for linguistic confusability of words in sentences. We found that word level confusability, a challenging value to predict, can be used as an additional prior to increase intelligibility even for simple enhancement methods like energy reallocation between words. These findings motivate further research into solutions that can tackle the effect of energetic masking on the auditory system as well as on higher levels of processing.},
  year = {2013},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2013/Cassia_PhD13.pdf},
  categories = {speech synthesis, speech intelligibility in noise}
}
@inproceedings{Cassia_IS13,
  author = {Valentini-Botinhao, C. and Yamagishi, J. and King, S. and Stylianou, Y.},
  title = {{Combining perceptually-motivated spectral shaping with loudness and duration modification for intelligibility enhancement of HMM-based synthetic speech in noise}},
  booktitle = {Proc. Interspeech},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2013/Cassia_IS13.pdf},
  abstact = {This paper presents our entry to a speech-in-noise intelligibility enhancement evaluation: the Hurricane Challenge. The system consists of a Text-To-Speech voice manipulated through a combination of enhancement strategies, each of which is known to be individually successful: a perceptually-motivated spectral shaper based on the Glimpse Proportion measure, dynamic range compression, and adaptation to Lombard excitation and duration patterns. We achieved substantial intelligibility improvements relative to unmodified synthetic speech: 4.9 dB in competing speaker and 4.1 dB in speech-shaped noise. An analysis conducted across this and other two similar evaluations shows that the spectral shaper and the compressor (both of which are loudness boosters) contribute most under higher SNR conditions, particularly for speech-shaped noise. Duration and excitation Lombard-adapted changes are more beneficial in lower SNR conditions, and for competing speaker noise.}
}
@inproceedings{Cooke_IS13,
  author = {Cooke, M. and Mayo, C. and Valentini-Botinhao, C.},
  title = {{Intelligibility-enhancing speech modifications: the Hurricane Challenge}},
  booktitle = {Proc. Interspeech},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2013/Cooke_IS13.pdf},
  abstact = {Speech output is used extensively, including in situations where correct message reception is threatened by adverse listening conditions. Recently, there has been a growing interest in algorithmic modifications that aim to increase the intelligibility of both natural and synthetic speech when presented in noise. The Hurricane Challenge is the first large-scale open evaluation of algorithms designed to enhance speech intelligibility. Eighteen systems operating on a common data set were subjected to extensive listening tests and compared to unmodified natural and text-to-speech (TTS) baselines. The best-performing systems achieved gains over unmodified natural speech of 4.4 and 5.1 dB in competing speaker and stationary noise respectively, while TTS systems made gains of 5.6 and 5.1 dB over their baseline. Surprisingly, for most conditions the largest gains were observed for noise-independent algorithms, suggesting that performance in this task can be further improved by exploiting information in the masking signal.}
}
@inproceedings{Cassia_ICASSP13,
  author = {Valentini-Botinhao, C. and Godoy, E. and Stylianou, Y. and Sauert, B. and King, S. and Yamagishi, J.},
  title = {{Improving intelligibility in noise of HMM-generated speech via noise-dependent and -independent methods.}},
  booktitle = {Proc. ICASSP},
  year = {2013},
  month = {May},
  address = {Vancouver, Canada},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Cassia_ICASSP13.pdf},
  abstact = {In order to improve the intelligibility of HMM-generated Text-to- Speech (TTS) in noise, this work evaluates several speech enhancement methods, exploring combinations of noise-independent and -dependent approaches as well as algorithms previously developed for natural speech. We evaluate one noise-dependent method proposed for TTS, based on the glimpse proportion measure, and three approaches originally proposed for natural speech - one that estimates the noise and is based on the speech intelligibility index, and two noise-independent methods based on different spectral shaping techniques followed by dynamic range compression. We demonstrate how these methods influence the average spectra for different phone classes. We then present results of a listening experiment with speech-shaped noise and a competing speaker. A few methods made the TTS voice even more intelligible than the natural one. Although noise-dependent methods did not improve gains, the intelligibility differences found in distinct noises motivates such dependency.}
}
@inproceedings{Tang_SPIN13,
  author = {Tang, Y. and Cooke, M. and Valentini-Botinhao, C.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Tang_SPIN13.pdf},
  booktitle = {Proc. SPIN},
  year = {2013},
  title = {A distortion-weighted glimpse-based intelligibility metric for modified and synthetic speech}
}
@article{Cooke_SPCOM13,
  author = {Cooke, M. and Mayo, C. and Valentini-Botinhao, C. and Stylianou, Y. and Sauert, B. and Tang, Y.},
  title = {Evaluating the intelligibility benefit of speech modifications in known noise conditions},
  journal = {Speech Communication},
  pages = {572-585},
  volume = {55},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2012/Cooke_SPCOM13.pdf},
  issue = {4},
  abstract = {The use of live and recorded speech is widespread in applications where correct message reception is important. Furthermore, the deployment of synthetic speech in such applications is growing. Modifications to natural and synthetic speech have therefore been proposed which aim at improving intelligibility in noise. The current study compares the benefits of speech modification algorithms in a large-scale speech intelligibility evaluation and quantifies the equivalent intensity change, defined as the amount in decibels that unmodified speech would need to be adjusted by in order to achieve the same intelligibility as modified speech. Listeners identified keywords in phonetically-balanced sentences representing ten different types of speech: plain and Lombard speech, five types of modified speech, and three forms of synthetic speech. Sentences were masked by either a stationary or a competing speech masker. Modification methods varied in the manner and degree to which they exploited estimates of the masking noise. The best-performing modifications led to equivalent intensity changes of around 5 dB in moderate and high noise levels for the stationary masker, and 3--4 dB in the presence of competing speech. These gains exceed those produced by Lombard speech. Synthetic speech in noise was always less intelligible than plain natural speech, but modified synthetic speech reduced this deficit by a significant amount.}
}
@article{lu2013,
  author = {Lu, Liang and Chin, KK and Ghoshal, Arnab and Renals, Steve},
  doi = {10.1109/TASL.2013.2248718},
  title = {Joint Uncertainty Decoding for Noise Robust Subspace {Gaussian} Mixture Models},
  journal = {IEEE Transactions on Audio, Speech and Language Processing},
  number = {9},
  abstract = {Joint uncertainty decoding (JUD) is a model-based noise compensation technique for conventional Gaussian Mixture Model (GMM) based speech recognition systems. Unlike vector Taylor series (VTS) compensation which operates on the individual Gaussian components in an acoustic model, JUD clusters the Gaussian components into a smaller number of classes, sharing the compensation parameters for the set of Gaussians in a given class. This significantly reduces the computational cost. In this paper, we investigate noise compensation for subspace Gaussian mixture model (SGMM) based speech recognition systems using JUD. The total number of Gaussian components in an SGMM is typically very large. Therefore direct compensation of the individual Gaussian components, as performed by VTS, is computationally expensive. In this paper we show that JUD-based noise compensation can be successfully applied to SGMMs in a computationally efficient way. We evaluate the JUD/SGMM technique on the standard Aurora 4 corpus. Our experimental results indicate that the JUD/SGMM system results in lower word error rates compared with a conventional GMM system with either VTS-based or JUD-based noise compensation.},
  volume = {21},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/liang-taslp12-noise.pdf},
  pages = {1791--1804}
}
@inproceedings{Swietojanski:ICASSP13,
  author = {Swietojanski, Pawel and Ghoshal, Arnab and Renals, Steve},
  doi = {10.1109/ICASSP.2013.6638967},
  title = {Revisiting Hybrid and {GMM-HMM} system combination techniques},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Swietojanski_ICASSP2013.pdf},
  abstract = {In this paper we investigate techniques to combine hybrid HMM-DNN (hidden Markov model -- deep neural network) and tandem HMM-GMM (hidden Markov model -- Gaussian mixture model) acoustic models using: (1) model averaging, and (2) lattice combination with Minimum Bayes Risk decoding. We have performed experiments on the ``TED Talks'' task following the protocol of the IWSLT-2012 evaluation. Our experimental results suggest that DNN-based and GMM- based acoustic models are complementary, with error rates being reduced by up to 8% relative when the DNN and GMM systems are combined at model-level in a multi-pass auto- matic speech recognition (ASR) system. Additionally, further gains were obtained by combining model-averaged lat- tices with the one obtained from baseline systems.},
  categories = {deep neural networks, tandem, hybrid, system combination, TED}
}
@inproceedings{Ghoshal:ICASSP13,
  author = {Ghoshal, Arnab and Swietojanski, Pawel and Renals, Steve},
  doi = {10.1109/ICASSP.2013.6639084},
  title = {Multilingual training of deep neural networks},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Ghoshal_ICASSP2013.pdf},
  abstract = {We investigate multilingual modeling in the context of a deep neural network (DNN) -- hidden Markov model (HMM) hy- brid, where the DNN outputs are used as the HMM state like- lihoods. By viewing neural networks as a cascade of fea- ture extractors followed by a logistic regression classifier, we hypothesise that the hidden layers, which act as feature ex- tractors, will be transferable between languages. As a corol- lary, we propose that training the hidden layers on multiple languages makes them more suitable for such cross-lingual transfer. We experimentally confirm these hypotheses on the GlobalPhone corpus using seven languages from three dif- ferent language families: Germanic, Romance, and Slavic. The experiments demonstrate substantial improvements over a monolingual DNN-HMM hybrid baseline, and hint at av- enues of further exploration.},
  categories = {Speech recognition, deep learning, neural networks, multilingual modeling}
}
@conference{Heng13,
  author = {Lu, H. and King, S.},
  title = {Factorized context modelling for text-to-speech synthesis},
  abstract = {Because speech units are so context-dependent, a large number of linguistic context features are generally used by HMM- based Text-to-Speech (TTS) speech synthesis systems, via context-dependent models. Since it is impossible to train separate models for every context, decision trees are used to discover the most important combinations of features that should be modelled. The task of the decision tree is very hard to generalize from a very small observed part of the context feature space to the rest and they have a major weakness: they cannot directly take advantage of factorial properties: they subdivide the model space based on one feature at a time. We propose a Dynamic Bayesian Network (DBN) based Mixed Memory Markov Model (MMMM) to provide factorization of the context space. The results of a listening test are provided as evidence that the model successfully learns the factorial nature of this space.},
  year = {2013},
  month = {May},
  address = {Vancouver, Canada},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/IC13HengSimon.pdf},
  booktitle = {Proc. ICASSP}
}
@article{HTS,
  author = {Tokuda, Keiichi and Nankaku, Yoshihiko and Today, Tomoki and Zen, Heiga and Yamagishi, Junichi and Oura, Keiichiro},
  volume = {101},
  title = {Speech Synthesis Based on Hidden Markov Models},
  abstract = {This paper gives a general overview of hidden Markov model (HMM)-based speech synthesis, which has recently been demonstrated to be very effective in synthesizing speech. The main advantage of this approach is its flexibility in changing speaker identities, emotions, and speaking styles. This paper also discusses the relation between the HMM-based approach and the more conventional unit-selection approach that has dominated over the last decades. Finally, advanced techniques for future developments are described.},
  number = {6},
  month = {June},
  note = {(in press)},
  year = {2013},
  journal = {Proceedings of the IEEE}
}
@inproceedings{hartswood:13,
  author = {Hartswood, Mark and Wolters, Maria and Ure, Jenny and Anderson, Stuart and Jirotka, Marina},
  title = {Socio-material design for computer mediated social sensemaking},
  abstract = {Telemonitoring healthcare solutions often struggle to provide the hoped for efficiency improvements in managing chronic illness because of the difficulty interpreting sensor data remotely. Computer-Mediated Social Sensemaking (CMSS) is an approach to solving this problem that leverages the patient's social network to supply the missing contextual detail so that remote doctors can make more accurate decisions. However, implementing CMSS solutions is difficult because users need to know who can see which information, and whether private and confidential information is adequately protected. In this paper, we wish to explore how socio-material design solutions might offer ways of making properties of a CMSS solution tangible to participants so that they can control and understand the implications of their participation.},
  month = {April},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Hartswood-et-al_Socio-Material-Design-for-Computer-Mediated-Social-Sensemaking.pdf},
  booktitle = {Proc. CHI Workshop on Explorations in Social Interaction Design},
  categories = {social media; eHealth; confidentiality; chronic illness; social sense making; tele care; tele health; privacy}
}
@inproceedings{ting_embc13,
  author = {Ting, Chee-Ming and King, Simon and Salleh, Sh-Hussain and Ariff, A. K.},
  title = {Discriminative Tandem Features for {HMM}-based {EEG} Classification},
  abstract = {We investigate the use of discriminative feature extractors in tandem configuration with generative EEG classification system. Existing studies on dynamic EEG classification typically use hidden Markov models (HMMs) which lack discriminative capability. In this paper, a linear and a non-linear classifier are discriminatively trained to produce complementary input features to the conventional HMM system. Two sets of tandem features are derived from linear discriminant analysis (LDA) projection output and multilayer perceptron (MLP) class-posterior probability, before appended to the standard autoregressive (AR) features. Evaluation on a two-class motor-imagery classification task shows that both the proposed tandem features yield consistent gains over the AR baseline, resulting in significant relative improvement of 6.2% and 11.2% for the LDA and MLP features respectively. We also explore portability of these features across different subjects.},
  year = {2013},
  month = {July},
  address = {Osaka, Japan},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/EBMC-2013-Tandem-Features.pdf},
  booktitle = {Proc. 35th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC 13)}
}
@inproceedings{sinclair_ICASSP13,
  author = {Sinclair, Mark and King, Simon},
  title = {Where are the challenges in speaker diarization?},
  abstract = {We present a study on the contributions to Diarization Error Rate by the various components of speaker diarization system. Following on from an earlier study by Huijbregts and Wooters, we extend into more areas and draw somewhat different conclusions. From a series of experiments combining real, oracle and ideal system components, we are able to conclude that the primary cause of error in diarization is the training of speaker models on impure data, something that is in fact done in every current system. We conclude by suggesting ways to improve future systems, including a focus on training the speaker models from smaller quantities of pure data instead of all the data, as is currently done.},
  year = {2013},
  month = {May},
  address = {Vancouver, British Columbia, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/3512.pdf},
  booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on},
  categories = {speaker diarization, diarization error rate}
}
@inproceedings{rasipuram13_gaelic_graphemes,
  author = {Rasipuram, Ramya and Bell, Peter and Magimai.-Doss, Mathew},
  title = {Grapheme and multilingual posterior features for under-resourced speech recognition: a study on {S}cottish {G}aelic},
  abstract = {Standard automatic speech recognition (ASR) systems use phonemes as subword units. Thus, one of the primary resources required to build a good ASR system is a well developed phoneme pronunciation lexicon. However, under-resourced languages typically lack such lexical resources. In this paper, we investigate recently proposed grapheme-based ASR in the framework of Kullback-Leibler divergence based hidden Markov model (KL-HMM) for under-resourced languages, particularly Scottish Gaelic which has no lexical resources. More specifically, we study the use of grapheme and multilingual phoneme class conditional probabilities (posterior features) as feature observations in the KL-HMM. ASR studies conducted show that the proposed approach yields better system compared to the conventional HMM/GMM approach using cepstral features. Furthermore, grapheme posterior features estimated using both auxiliary data and Gaelic data yield the best system.},
  year = {2013},
  month = {May},
  address = {Vancouver, Canada},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/gaelic_graphemes_icassp13.pdf},
  booktitle = {Proc. ICASSP}
}
@inproceedings{bell13_mlan,
  author = {Bell, Peter and Swietojanski, Pawel and Renals, Steve},
  doi = {10.1109/ICASSP.2013.6639014},
  title = {Multi-level adaptive networks in tandem and hybrid {ASR} systems},
  abstract = {In this paper we investigate the use of Multi-level adaptive networks (MLAN) to incorporate out-of-domain data when training large vocabulary speech recognition systems. In a set of experiments on multi-genre broadcast data and on TED lecture recordings we present results using of out-of-domain features in a hybrid DNN system and explore tandem systems using a variety of input acoustic features. Our experiments indicate using the MLAN approach in both hybrid and tandem systems results in consistent reductions in word error rate of 5--10\% relative.},
  address = {Vancouver, Canada},
  month = {May},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/mlan_icassp2013.pdf},
  booktitle = {Proc. ICASSP}
}
@article{6289354,
  author = {Ling, Z. and Richmond, K. and Yamagishi, J.},
  doi = {10.1109/TASL.2012.2215600},
  title = {Articulatory Control of {HMM}-based Parametric Speech Synthesis using Feature-Space-Switched Multiple Regression},
  journal = {Audio, Speech, and Language Processing, IEEE Transactions on},
  issn = {1558-7916},
  number = {1},
  abstract = {In previous work we proposed a method to control the characteristics of synthetic speech flexibly by integrating articulatory features into a hidden Markov model (HMM) based parametric speech synthesiser. In this method, a unified acoustic-articulatory model is trained, and context-dependent linear transforms are used to model the dependency between the two feature streams. In this paper, we go significantly further and propose a feature-space-switched multiple regression HMM to improve the performance of articulatory control. A multiple regression HMM (MRHMM) is adopted to model the distribution of acoustic features, with articulatory features used as exogenous explanatory variables. A separate Gaussian mixture model (GMM) is introduced to model the articulatory space, and articulatory-to-acoustic regression matrices are trained for each component of this GMM, instead of for the context-dependent states in the HMM. Furthermore, we propose a task-specific context feature tailoring method to ensure compatibility between state context features and articulatory features that are manipulated at synthesis time. The proposed method is evaluated on two tasks, using a speech database with acoustic waveforms and articulatory movements recorded in parallel by electromagnetic articulography (EMA). In a vowel identity modification task, the new method achieves better performance when reconstructing target vowels by varying articulatory inputs than our previous approach. A second vowel creation task shows our new method is highly effective at producing a new vowel from appropriate articulatory representations which, even though no acoustic samples for this vowel are present in the training data, is shown to sound highly natural.},
  volume = {21},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/06289354.pdf},
  pages = {207--219}
}
@article{Dines2011,
  author = {Dines, John and Liang, Hui and Saheer, Lakshmi and Gibson, Matthew and Byrne, William and Oura, Keiichiro and Tokuda, Keiichi and Yamagishi, Junichi and King, Simon and Wester, Mirjam and Hirsimäki, Teemu and Karhila, Reima and Kurimo, Mikko},
  doi = {10.1016/j.csl.2011.08.003},
  title = {Personalising speech-to-speech translation: Unsupervised cross-lingual speaker adaptation for {HMM}-based speech synthesis},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230811000441},
  journal = {Computer Speech and Language},
  issn = {0885-2308},
  number = {2},
  abstract = {In this paper we present results of unsupervised cross-lingual speaker adaptation applied to text-to-speech synthesis. The application of our research is the personalisation of speech-to-speech translation in which we employ a HMM statistical framework for both speech recognition and synthesis. This framework provides a logical mechanism to adapt synthesised speech output to the voice of the user by way of speech recognition. In this work we present results of several different unsupervised and cross-lingual adaptation approaches as well as an end-to-end speaker adaptive speech-to-speech translation system. Our experiments show that we can successfully apply speaker adaptation in both unsupervised and cross-lingual scenarios and our proposed algorithms seem to generalise well for several language pairs. We also discuss important future directions including the need for better evaluation metrics.},
  month = {February},
  volume = {27},
  year = {2013},
  keywords = {Speech-to-speech translation, Cross-lingual speaker adaptation, HMM-based speech synthesis, Speaker adaptation, Voice conversion},
  pages = {420--437}
}
@inproceedings{CalzadaClark2013,
  author = {Defez, Àngel Calzada and Carrié, Joan Claudi Socoró and Clark, Robert},
  title = {Parametric model for vocal effort interpolation with Harmonics Plus Noise Models},
  booktitle = {Proc. 8th ISCA Speech Synthesis Workshop},
  abstract = {It is known that voice quality plays an important role in expressive speech. In this paper, we present a methodology for modifying vocal effort level, which can be applied by text-to-speech (TTS) systems to provide the flexibility needed to improve the naturalness of synthesized speech. This extends previous work using low order Linear Prediction Coefficients (LPC) where the flexibility was constrained by the amount of vocal effort levels available in the corpora. The proposed methodology overcomes these limitations by replacing the low order LPC by ninth order polynomials to allow not only vocal effort to be modified towards the available templates, but also to allow the generation of intermediate vocal effort levels between levels available in training data. This flexibility comes from the combination of Harmonics plus Noise Models and using a parametric model to represent the spectral envelope. The conducted perceptual tests demonstrate the effectiveness of the proposed technique in per- forming vocal effort interpolations while maintaining the signal quality in the final synthesis. The proposed technique can be used in unit-selection TTS systems to reduce corpus size while increasing its flexibility, and the techniques could potentially be employed by HMM based speech synthesis systems if appropriate acoustic features are being used.},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS1-1_Calzada_Defez.pdf},
  pages = {25-30},
  categories = {vocal effort interpolation, harmonics plus noise model, expressive speech synthesis}
}
@inproceedings{Valentini-Botinhao_SSW8,
  author = {Valentini-Botinhao, Cassia and Wester, Mirjam and Yamagishi, Junichi and King, Simon},
  title = {Using neighbourhood density and selective {SNR} boosting to increase the intelligibility of synthetic speech in noise},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {Motivated by the fact that words are not equally confusable, we explore the idea of using word-level intelligibility predictions to selectively boost the harder-to-understand words in a sentence, aiming to improve overall intelligibility in the presence of noise. First, the intelligibility of a set of words from dense and sparse phonetic neighbourhoods was evaluated in isolation. The resulting intelligibility scores were used to inform two sentencelevel experiments. In the first experiment the signal-to-noise ratio of one word was boosted to the detriment of another word. Sentence intelligibility did not generally improve. The intelligibility of words in isolation and in a sentence were found to be significantly different, both in clean and in noisy conditions. For the second experiment, one word was selectively boosted while slightly attenuating all other words in the sentence. This strategy was successful for words that were poorly recognised in that particular context. However, a reliable predictor of word-in-context intelligibility remains elusive, since this involves – as our results indicate – semantic, syntactic and acoustic information about the word and the sentence.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Cassia_SSW13.pdf},
  pages = {133--138}
}
@inproceedings{Merritt_SSW8,
  author = {Merritt, Thomas and King, Simon},
  title = {Investigating the shortcomings of {HMM} synthesis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {This paper presents the beginnings of a framework for formal testing of the causes of the current limited quality of HMM (Hidden Markov Model) speech synthesis. This framework separates each of the effects of modelling to observe their independent effects on vocoded speech parameters in order to address the issues that are restricting the progression to highly intelligible and natural-sounding speech synthesis. The simulated HMM synthesis conditions are performed on spectral speech parameters and tested via a pairwise listening test, asking listeners to perform a “same or different” judgement on the quality of the synthesised speech produced between these conditions. These responses are then processed using multidimensional scaling to identify the qualities in modelled speech that listeners are attending to and thus forms the basis of why they are distinguishable from natural speech. The future improvements to be made to the framework will finally be discussed which include the extension to more of the parameters modelled during speech synthesis.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS2-4_Merritt.pdf},
  pages = {185--190},
  categories = {speech synthesis, hidden markov models, vocoding}
}
@article{Creer20131178,
  author = {Creer, Sarah and Cunningham, Stuart and Green, Phil and Yamagishi, Junichi},
  note = {Special Issue on Speech and Language Processing for Assistive Technology},
  doi = {http://dx.doi.org/10.1016/j.csl.2012.10.001},
  title = {Building personalised synthetic voices for individuals with severe speech impairment},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230812000836},
  journal = {Computer Speech & Language},
  issn = {0885-2308},
  number = {6},
  volume = {27},
  year = {2013},
  keywords = {Voice output communication aid},
  pages = {1178 - 1193}
}
@inproceedings{EURECOM+4018,
  author = {Evans, Nicholas W D and Kinnunen, Tomi and Yamagishi, Junichi},
  title = {Spoofing and countermeasures for automatic speaker verification},
  booktitle = {{Interspeech} 2013, 14th {A}nnual {C}onference of the {I}nternational {S}peech {C}ommunication {A}ssociation, {A}ugust 25-29, 2013, {L}yon, {F}rance},
  year = {2013},
  month = {August},
  address = {{L}yon, {FRANCE}},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/mm-publi-4018.pdf}
}
@inproceedings{Astrinaki_SSW8,
  author = {Astrinaki, Maria and Moinet, Alexis and Yamagishi, Junichi and Richmond, Korin and Ling, Zhen-Hua and King, Simon and Dutoit, Thierry},
  title = {Mage - Reactive articulatory feature control of {HMM}-based parametric speech synthesis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_OS5-1_Astrinaki.pdf},
  pages = {227--231}
}
@inproceedings{lai2013summarize,
  author = {Lai, Catherine and Carletta, Jean and Renals, Steve},
  title = {Detecting Summarization Hot Spots in Meetings Using Group Level Involvement and Turn-Taking Features},
  booktitle = {Proc. Interspeech 2013, Lyon, France},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/laic2013summarization.pdf},
  abstract = {In this paper we investigate how participant involvement and turn-taking features relate to extractive summarization of meeting dialogues. In particular, we examine whether automatically derived measures of group level involvement, like participation equality and turn-taking freedom, can help detect where summarization relevant meeting segments will be. Results show that classification using turn-taking features performed better than the majority class baseline for data from both AMI and ICSI meeting corpora in identifying whether meeting segments contain extractive summary dialogue acts. The feature based approach also provided better recall than using manual ICSI involvement hot spot annotations. Turn-taking features were additionally found to be predictive of the amount of extractive summary content in a segment. In general, we find that summary content decreases with higher participation equality and overlap, while it increases with the number of very short utterances. Differences in results between the AMI and ICSI data sets suggest how group participatory structure can be used to understand what makes meetings easy or difficult to summarize.},
  categories = {summarization, turn-taking, involvement, social signals}
}
@inproceedings{lai2013affect,
  author = {Lai, Catherine and Carletta, Jean and Renals, Steve},
  title = {Modelling Participant Affect in Meetings with Turn-Taking Features},
  booktitle = {Proceedings of WASSS 2013, Grenoble, France},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/laic2013affect.pdf},
  abstract = {This paper explores the relationship between turn-taking and meeting affect. To investigate this, we model post-meeting ratings of satisfaction, cohesion and leadership from participants of AMI corpus meetings using group and individual turn-taking features. The results indicate that participants gave higher satisfaction and cohesiveness ratings to meetings with greater group turn-taking freedom and individual very short utterance rates, while lower ratings were associated with more silence and speaker overlap. Besides broad applicability to satisfaction ratings, turn-taking freedom was found to be a better predictor than equality of speaking time when considering whether participants felt that everyone they had a chance to contribute. If we include dialogue act information, we see that substantive feedback type turns like assessments are more predictive of meeting affect than information giving acts or backchannels. This work highlights the importance of feedback turns and modelling group level activity in multiparty dialogue for understanding the social aspects of speech.},
  categories = {turn-taking, meetings, affect, involvement, social signals}
}
@inproceedings{laiEtAl2012rhythm,
  author = {Lai, Catherine and Evanini, Keelan and Zechner, Klaus},
  title = {Applying Rhythm Metrics to Non-native Spontaneous Speech},
  booktitle = {Proceedings of SLaTE 2013, Grenoble, France},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/laiEtAl2012rhythm.pdf},
  abstract = {This study investigates a variety of rhythm metrics on two corpora of non-native spontaneous speech and compares the nonnative distributions to values from a corpus of native speech. Several of the metrics are shown to differentiate well between native and non-native speakers and to also have moderate correlations with English proficiency scores that were assigned to the non-native speech. The metric that had the highest correlation with English proficiency scores (apart from speaking rate) was rPVIsyl (the raw Pairwise Variability Index for syllables), with r = 0.43.},
  categories = {L2 speech, pronunciation scoring, rhythm}
}
@inproceedings{Swietojanski:ASRU13,
  author = {Swietojanski, P. and Ghoshal, A. and Renals, S.},
  doi = {10.1109/ASRU.2013.6707744},
  title = {HYBRID ACOUSTIC MODELS FOR DISTANT AND MULTICHANNEL LARGE VOCABULARY SPEECH RECOGNITION},
  abstract = {We investigate the application of deep neural network (DNN)-hidden Markov model (HMM) hybrid acoustic models for far-field speech recognition of meetings recorded using microphone arrays. We show that the hybrid models achieve significantly better accuracy than conventional systems based on Gaussian mixture models (GMMs). We observe up to 8% absolute word error rate (WER) reduction from a discriminatively trained GMM baseline when using a single distant microphone, and between 4–6% absolute WER reduction when using beamforming on various combinations of array channels. By training the networks on audio from multiple channels, we find the networks can recover significant part of accuracy difference between the single distant microphone and beamformed configurations. Finally, we show that the accuracy of a network recognising speech from a single distant microphone can approach that of a multi-microphone setup by training with data from other microphones.},
  month = {December},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Swietojanski_ASRU2013.pdf},
  booktitle = {Proc. IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU)},
  categories = {Distant Speech Recognition, Deep Neural Networks, Microphone Arrays, Beamforming, Meeting recognition}
}
@inproceedings{Hu_SSW8,
  author = {Hu, Qiong and Richmond, Korin and Yamagishi, Junichi and Latorre, Javier},
  title = {An experimental comparison of multiple vocoder types},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {This paper presents an experimental comparison of a broad range of the leading vocoder types which have been previously described. We use a reference implementation of each of these to create stimuli for a listening test using copy synthesis. The listening test is performed using both Lombard and normal read speech stimuli, and with two types of question for comparison. Multi-dimensional Scaling (MDS) is conducted on the listener responses to analyse similarities in terms of quality between the vocoders. Our MDS and clustering results show that the vocoders which use a sinusoidal synthesis approach are perceptually distinguishable from the source-filter vocoders. To help further interpret the axes of the resulting MDS space, we test for correlations with standard acoustic quality metrics and find one axis is strongly correlated with PESQ scores. We also find both speech style and the format of the listening test question may influence test results. Finally, we also present preference test results which compare each vocoder with the natural speech.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_OS4-3_Hu.pdf},
  pages = {155--160}
}
@inproceedings{bell13_lecture_transcription,
  author = {Bell, Peter and Yamamoto, Hitoshi and Swietojanski, Pawel and Wu, Youzheng and McInnes, Fergus and Hori, Chiori and Renals, Steve},
  title = {A lecture transcription system combining neural network acoustic and language models},
  booktitle = {Proc. Interspeech},
  address = {Lyon, France},
  month = {August},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/lecture_transcription_is2013.pdf},
  abstract = {This paper presents a new system for automatic transcription of lectures. The system combines a number of novel features, including deep neural network acoustic models using multi-level adaptive networks to incorporate out-of-domain information, and factored recurrent neural network language models. We demonstrate that the system achieves large improvements on the TED lecture transcription task from the 2012 IWSLT evaluation -- our results are currently the best reported on this task, showing an relative WER reduction of more than 16\% compared to the closest competing system from the evaluation.}
}
@inproceedings{stan13_lightly_supervised_discriminative,
  author = {Stan, Adriana and Bell, Peter and Yamagishi, Junichi and King, Simon},
  title = {Lightly Supervised Discriminative Training of Grapheme Models for Improved Sentence-level Alignment of Speech and Text Data},
  booktitle = {Proc. Interspeech},
  address = {Lyon, France},
  month = {August},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/lightly_supervised_discriminative_is2013.pdf},
  abstract = {This paper introduces a method for lightly supervised discriminative training using MMI to improve the alignment of speech and text data for use in training HMM-based TTS systems for low-resource languages. In TTS applications, due to the use of long-span contexts, it is important to select training utterances which have wholly correct transcriptions. In a low-resource setting, when using poorly trained grapheme models, we show that the use of MMI discriminative training at the grapheme-level enables us to increase the amount of correctly aligned data by 40\%, while maintaining a 7\% sentence error rate and 0.8\% word error rate. We present the procedure for lightly supervised discriminative training with regard to the objective of minimising sentence error rate.}
}
@inproceedings{christensen13_disordered,
  author = {Christensen, H. and Aniol, M. and Bell, P. and Green, P. and Hain, T. and King, S. and Swietojanski, P.},
  title = {Combining in-domain and out-of-domain speech data for automatic recognition of disordered speech},
  booktitle = {Proc. Interspeech},
  address = {Lyon, France},
  month = {August},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/christensen_is13_2_final.pdf},
  abstract = {Recently there has been increasing interest in ways of using out-of-domain (OOD) data to improve automatic speech recognition performance in domains where only limited data is available. This paper focuses on one such domain, namely that of disordered speech for which only very small databases exist, but where normal speech can be considered OOD. Standard approaches for handling small data domains use adaptation from OOD models into the target domain, but here we investigate an alternative approach with its focus on the feature extraction stage: OOD data is used to train feature-generating deep belief neural networks. Using AMI meeting and TED talk datasets, we investigate various tandem-based speaker independent systems as well as maximum a posteriori adapted speaker dependent systems. Results on the UAspeech isolated word task of disordered speech are very promising with our overall best system (using a combination of AMI and TED data) giving a correctness of 62.5\%; an increase of 15\% on previously best published results based on conventional model adaptation. We show that the relative benefit of using OOD data varies considerably from speaker to speaker and is only loosely correlated with the severity of a speaker's impairments.}
}
@inproceedings{lanchantin13_multigenre_transcription,
  author = {Lanchantin, P. and Bell, P. and Gales, M. and Hain, T. and Liu, X. and Long, Y. and Quinnell, J. and Renals, S. and Saz, O. and Seigel, M. and Swietojanski, P. and Woodland, P.},
  title = {Automatic Transcription of Multi-genre Media Archives},
  booktitle = {Proc. Workshop on Speech, Language and Audio in Multimedia},
  year = {2013},
  address = {Marseille, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/lanchantin13_multigenre_transcription.pdf},
  abstract = {This paper describes some recent results of our collaborative work on developing a speech recognition system for the automatic transcription or media archives from the British Broadcasting Corporation (BBC). Material includes a high diversity of shows with their associated transcriptions. The latter are highly diverse in terms of completeness, reliability and accuracy. First, we investigate how to improve lightly supervised acoustic training when time-stamps information is inaccurate or when speech deviates significantly from the transcription. To address the last issue, word and segment level combination approaches are used between the lightly supervised transcripts and the original programme scripts which yield improved transcriptions. Experimental results show that systems trained using these improved transcriptions consistently outperform those trained using only the original lightly supervised decoding hypotheses. Secondly, we show that the recognition task may benefit from systems trained on a combination of in-domain and out-of-domain data. Working with tandem HMMs, we present Multi-level Adaptive Networks, a novel technique for incorporating information from out-of domain posterior features using deep neural network. We show that it provides a substantial reduction in WER over other systems including PLP baseline, in-domain tandem features and best out-of-domain tandem features.}
}
@inproceedings{Braude2013a,
  author = {Braude, David Adam and Shimodaira, Hiroshi and Ben Youssef, Atef},
  title = {Template-Warping Based Speech Driven Head Motion Synthesis},
  booktitle = {Interspeech},
  abstract = {We propose a method for synthesising head motion from speech using a combination of an Input-Output Markov model (IOMM) and Gaussian mixture models trained in a supervised manner. A key difference of this approach compared to others is to model the head motion in each angle as a series of templates of motion rather than trying to recover a frame-wise function. The templates were chosen to reflect natural patterns in the head motion, and states for the IOMM were chosen based on statistics of the templates. This reduces the search space for the trajectories and stops impossible motions such as discontinuities from being possible. For synthesis our system warps the templates to account for the acoustic features and the other angles' warping parameters. We show our system is capable of recovering the statistics of the motion that were chosen for the states. Our system was then compared to a baseline that used a frame-wise mapping that is based on previously published work. A subjective preference test that includes multiple speakers showed participants have a preference for the segment based approach. Both of these systems were trained on storytelling free speech.},
  year = {2013},
  keywords = {Head motion synthesis, GMMs, IOMM},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Braude-etal_IS13.pdf},
  pages = {2763 -- 2767}
}
@inproceedings{jdriesen_asru13,
  author = {Driesen, Joris and Renals, Steve},
  doi = {10.1109/ASRU.2013.6707772},
  title = {Lightly Supervised Automatic Subtitling of Weather Forecasts},
  booktitle = {Proc. Automatic Speech Recognition and Understanding Workshop},
  address = {Olomouc, Czech Republic},
  month = {December},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/asru13.pdf},
  abstract = {Since subtitling television content is a costly process, there are large potential advantages to automating it, using automatic speech recognition (ASR). However, training the necessary acoustic models can be a challenge, since the available training data usually lacks verbatim orthographic transcriptions. If there are approximate transcriptions, this problem can be overcome using light supervision methods. In this paper, we perform speech recognition on broadcasts of Weatherview, BBC's daily weather report, as a first step towards automatic subtitling. For training, we use a large set of past broadcasts, using their manually created subtitles as approximate transcriptions. We discuss and and compare two different light supervision methods, applying them to this data. The best training set finally obtained with these methods is used to create a hybrid deep neural networkbased recognition system, which yields high recognition accuracies on three separate Weatherview evaluation sets.}
}
@inproceedings{Yanagisawa_SSW8,
  author = {Yanagisawa, Kayoko and Latorre, Javier and Wan, Vincent and Gales, Mark J. F. and King, Simon},
  title = {Noise Robustness in {HMM-TTS} Speaker Adaptation},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {Speaker adaptation for TTS applications has been receiving more attention in recent years for applications such as voice customisation or voice banking. If these applications are offered as an Internet service, there is no control on the quality of the data that can be collected. It can be noisy with people talking in the background or recorded in a reverberant environment. This makes the adaptation more difficult. This paper explores the effect of different levels of additive and convolutional noise on speaker adaptation techniques based on cluster adaptive training (CAT) and average voice model (AVM). The results indicate that although both techniques suffer degradation to some extent, CAT is in general more robust than AVM.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_OS3-3_Yanagisawa.pdf},
  pages = {139--144}
}
@article{Tejedor2013,
  author = {Tejedor, Javier and Toledano, Doroteo T. and Wang, Dong and King, Simon and Colas, Jose},
  title = {Feature analysis for discriminative confidence estimation in Spoken Term Detection},
  journal = {Computer Speech and Language},
  number = {},
  abstract = {Discriminative confidence based on multi-layer perceptrons (MLPs) and multiple features has shown significant advantage compared to the widely used lattice-based confidence in spoken term detection (STD). Although the MLP-based framework can handle any features derived from a multitude of sources, choosing all possible features may lead to over complex models and hence less generality. In this paper, we design an extensive set of features and analyze their contribution to STD individually and as a group. The main goal is to choose a small set of features that are sufficiently informative while keeping the model simple and generalizable. We employ two established models to conduct the analysis: one is linear regression which targets for the most relevant features and the other is logistic linear regression which targets for the most discriminative features. We find the most informative features are comprised of those derived from diverse sources (ASR decoding, duration and lexical properties) and the two models deliver highly consistent feature ranks. STD experiments on both English and Spanish data demonstrate significant performance gains with the proposed feature sets.},
  volume = {To appear},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Tejedor_CSL2013.pdf},
  pages = {}
}
@inproceedings{San-Segundo_SSW8,
  author = {San-Segundo, Rubén and Montero, Juan Manuel and Giurgiu, Mircea and Muresan, Ioana and King, Simon},
  title = {Multilingual Number Transcription for Text-to-Speech Conversion},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {This paper describes the text normalization module of a text to speech fully-trainable conversion system and its application to number transcription. The main target is to generate a language independent text normalization module, based on data instead of on expert rules. This paper proposes a general architecture based on statistical ma- chine translation techniques. This proposal is composed of three main modules: a tokenizer for splitting the text input into a token graph, a phrase-based translation module for token translation, and a post-processing module for removing some tokens. This architecture has been evaluated for number transcription in several languages: English, Spanish and Romanian. Number transcription is an important aspect in the text normalization problem.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS1-8_San-Segundo.pdf},
  pages = {85--89}
}
@inproceedings{Lu_SSW8,
  author = {Lu, Heng and King, Simon and Watts, Oliver},
  title = {Combining a Vector Space Representation of Linguistic Context with a Deep Neural Network for Text-To-Speech Synthesis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {Conventional statistical parametric speech synthesis relies on decision trees to cluster together similar contexts, result- ing in tied-parameter context-dependent hidden Markov models (HMMs). However, decision tree clustering has a major weak- ness: it use hard division and subdivides the model space based on one feature at a time, fragmenting the data and failing to exploit interactions between linguistic context features. These linguistic features themselves are also problematic, being noisy and of varied relevance to the acoustics. We propose to combine our previous work on vector-space representations of linguistic context, which have the added ad- vantage of working directly from textual input, and Deep Neural Networks (DNNs), which can directly accept such continuous representations as input. The outputs of the network are probability distributions over speech features. Maximum Likelihood Parameter Generation is then used to create parameter trajectories, which in turn drive a vocoder to generate the waveform. Various configurations of the system are compared, using both conventional and vector space context representations and with the DNN making speech parameter predictions at two dif- ferent temporal resolutions: frames, or states. Both objective and subjective results are presented.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS3-3_Lu.pdf},
  pages = {281--285}
}
@article{6578128,
  author = {Lal, P. and King, S.},
  doi = {10.1109/TASL.2013.2277932},
  title = {Cross-lingual Automatic Speech Recognition using Tandem Features},
  journal = {IEEE Transactions on Audio, Speech, and Language Processing},
  issn = {1558-7916},
  number = {},
  abstract = {Automatic speech recognition depends on large amounts of transcribed speech recordings in order to estimate the parameters of the acoustic model. Recording such large speech corpora is time-consuming and expensive; as a result, sufficient quantities of data exist only for a handful of languages — there are many more languages for which little or no data exist. Given that there are acoustic similarities between speech in different languages, it may be fruitful to use data from a well-resourced source language to estimate the acoustic models for a recogniser in a poorly-resourced target language. Previous approaches to this task have often involved making assumptions about shared phonetic inventories between the languages. Unfortunately pairs of languages do not generally share a common phonetic inventory. We propose an indirect way of transferring information from a source language acoustic model to a target language acoustic model without having to make any assumptions about the phonetic inventory overlap. To do this, we employ tandem features, in which class-posteriors from a separate classifier are decorrelated and appended to conventional acoustic features. Tandem features have the advantage that the language of the speech data used to train the classifier need not be the same as the target language to be recognised. This is because the class-posteriors are not used directly, so do not have to be over any particular set of classes. We demonstrate the use of tandem features in cross-lingual settings, including training on one or several source languages. We also examine factors which may predict a priori how much relative improvement will be brought about by using such tandem features, for a given source and target pair. In addition to conventional phoneme class-posteriors, we also investigate whether articulatory features (AFs) - a multistream, discrete, multi-valued labelling of speech — can be used instead. This is motivated by an assumption that AFs are less language-specific than a phoneme set.},
  volume = {To appear},
  year = {2013},
  keywords = {Acoustics;Data models;Hidden Markov models;Speech;Speech recognition;Training;Transforms;Automatic speech recognition;Multilayer perceptrons},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Lal_TASLP2013.pdf},
  pages = {}
}
@inproceedings{bourlard_slam2013,
  author = {Bourlard, H. and Ferras, M. and Pappas, N. and Popescu-Belis, A. and Renals, S. and McInnes, F. and Bell, P. and Ingram, S. and Guillemot, M.},
  title = {Processing and Linking Audio Events in Large Multimedia Archives: The {EU} {inEvent} Project},
  booktitle = {Proceedings of SLAM 2013 (First Workshop on Speech, Language and Audio in Multimedia)},
  year = {2013},
  month = {August},
  address = {Marseille, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/bourlard_slam2013.pdf},
  abstract = {In the inEvent EU project, we aim at structuring, retrieving, and sharing large archives of networked, and dynamically changing, multimedia recordings, mainly consisting of meetings, videoconferences, and lectures. More specifically, we are developing an integrated system that performs audiovisual processing of multimedia recordings, and labels them in terms of interconnected "hyper-events" (a notion inspired from hyper-texts). Each hyper-event is composed of simpler facets, including audio-video recordings and metadata, which are then easier to search, retrieve and share. In the present paper, we mainly cover the audio processing aspects of the system, including speech recognition, speaker diarization and linking (across recordings), the use of these features for hyper-event indexing and recommendation, and the search portal. We present initial results for feature extraction from lecture recordings using the TED talks.},
  categories = {networked multimedia events, audio processing: speech recognition, speaker diarization and linking, multimedia indexing and searching, hyper-events}
}
@inproceedings{bhatt_acmmm2013,
  author = {Bhatt, C. and Popescu-Belis, A. and Habibi, M. and Ingram, S. and Masneri, S. and McInnes, F. and Pappas, N. and Schreer, O.},
  title = {Multi-factor Segmentation for Topic Visualization and Recommendation: the {MUST-VIS} System},
  booktitle = {Proceedings of ACM Multimedia 2013},
  year = {2013},
  month = {October},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/bhatt_acmmm2013.pdf},
  abstract = {This paper presents the MUST-VIS system for the MediaMixer/VideoLectures.NET Temporal Segmentation and Annotation Grand Challenge. The system allows users to visualize a lecture as a series of segments represented by keyword clouds, with relations to other similar lectures and segments. Segmentation is performed using a multi-factor algorithm which takes advantage of the audio (through automatic speech recognition and word-based segmentation) and video (through the detection of actions such as writing on the blackboard). The similarity across segments and lectures is computed using a content-based recommendation algorithm. Overall, the graph-based representation of segment similarity appears to be a promising and cost-effective approach to navigating lecture databases.},
  categories = {content analysis and retrieval, multimedia information systems, lecture segmentation, lecture recommendations}
}
@inproceedings{Mamiya_SSW8,
  author = {Mamiya, Yoshitaka and Stan, Adriana and Yamagishi, Junichi and Bell, Peter and Watts, Oliver and Clark, Robert and King, Simon},
  title = {Using Adaptation to Improve Speech Transcription Alignment in Noisy and Reverberant Environments},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {When using data retrieved from the internet to create new speech databases, the recording conditions can often be highly variable within and between sessions. This variance influences the overall performance of any automatic speech and text alignment techniques used to process this data. In this paper we discuss the use of speaker adaptation methods to address this issue. Starting from a baseline system for automatic sentence-level segmentation and speech and text alignment based on GMMs and grapheme HMMs, respectively, we employ Maximum A Posteriori (MAP) and Constrained Maximum Likelihood Linear Regression (CMLLR) techniques to model the variation in the data in order to increase the amount of confidently aligned speech. We tested 29 different scenarios, which include reverberation, 8 talker babble noise and white noise, each in various combinations and SNRs. Results show that the MAP-based segmentation's performance is very much influenced by the noise type, as well as the presence or absence of reverberation. On the other hand, the CMLLR adaptation of the acoustic models gives an average 20\% increase in the aligned data percentage for the majority of the studied scenarios.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS1-4_Mamiya.pdf},
  pages = {61--66}
}
@inproceedings{Watts_SSW8,
  author = {Watts, Oliver and Stan, Adriana and Clark, Rob and Mamiya, Yoshitaka and Giurgiu, Mircea and Yamagishi, Junichi and King, Simon},
  title = {Unsupervised and lightly-supervised learning for rapid construction of {TTS} systems in multiple languages from 'found' data: evaluation and analysis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {This paper presents techniques for building text-to-speech front-ends in a way that avoids the need for language-specific expert knowledge, but instead relies on universal resources (such as the Unicode character database) and unsupervised learning from unannotated data to ease system development. The acquisition of expert language-specific knowledge and expert annotated data is a major bottleneck in the development of corpus-based TTS systems in new languages. The methods presented here side-step the need for such resources as pronunciation lexicons, phonetic feature sets, part of speech tagged data, etc. The paper explains how the techniques introduced are applied to the 14 languages of a corpus of `found' audiobook data. Results of an evaluation of the intelligibility of the systems resulting from applying these novel techniques to this data are presented.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_OS2-3_Watts.pdf},
  pages = {121--126}
}
@inproceedings{Lorenzo-Trueba_SSW8,
  author = {Lorenzo-Trueba, Jaime and Barra-Chicote, Roberto and Yamagishi, Junichi and Watts, Oliver and Montero, Juan M.},
  title = {Towards Speaking Style Transplantation in Speech Synthesis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {One of the biggest challenges in speech synthesis is the production of naturally sounding synthetic voices. This means that the resulting voice must be not only of high enough quality but also that it must be able to capture the natural expressiveness imbued in human speech. This paper focus on solving the expressiveness problem by proposing a set of different techniques that could be used for extrapolating the expressiveness of proven high quality speaking style models into neutral speakers in HMM-based synthesis. As an additional advantage, the proposed techniques are based on adaptation approaches, which means that they can be used with little training data (around 15 minutes of training data are used in each style for this pa- per). For the final implementation, a set of 4 speaking styles were considered: news broadcasts, live sports commentary, interviews and parliamentary speech. Finally, the implementation of the 5 techniques were tested through a perceptual evaluation that proves that the deviations between neutral and speaking style average models can be learned and used to imbue expressiveness into target neutral speakers as intended.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS2-3_Lorenzo-Trueba.pdf},
  pages = {179--183}
}
@inproceedings{Stan_IS13,
  author = {Stan, Adriana and Watts, Oliver and Mamiya, Yoshitaka and Giurgiu, Mircea and Clark, Rob and Yamagishi, Junichi and King, Simon},
  title = {{TUNDRA: A Multilingual Corpus of Found Data for TTS Research Created with Light Supervision}},
  booktitle = {Proc. Interspeech},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/IS131055.pdf},
  abstract = {Simple4All Tundra (version 1.0) is the first release of a standardised multilingual corpus designed for text-to-speech research with imperfect or found data. The corpus consists of approximately 60 hours of speech data from audiobooks in 14 languages, as well as utterance-level alignments obtained with a lightly-supervised process. Future versions of the corpus will include finer-grained alignment and prosodic annotation, all of which will be made freely available. This paper gives a general outline of the data collected so far, as well as a detailed description of how this has been done, emphasizing the minimal language-specific knowledge and manual intervention used to compile the corpus. To demonstrate its potential use, text-to-speech systems have been built for all languages using unsupervised or lightly supervised methods, also briefly presented in the paper.}
}
@inproceedings{blizzard_13,
  author = {Watts, Oliver and Stan, Adriana and Mamiya, Yoshitaka and Suni, Antti and Burgos, José Martín and Montero, Juan Manuel},
  title = {{The {Simple4All} entry to the Blizzard Challenge 2013}},
  booktitle = {Proc. Blizzard Challenge 2013},
  month = {August},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/S4A_blizzard_2013.pdf},
  abstract = {We describe the synthetic voices entered into the 2013 Blizzard Challenge by the SIMPLE4ALL consortium. The 2013 Blizzard Challenge presents an opportunity to test and benchmark some of the tools we have been developing to address two problems of interest: 1) how best to learn from plentiful 'found' data, and 2) how to produce systems in arbitrary new languages with minimal annotated data and language-specific expertise on the part of the system builders. We here explain how our tools were used to address these problems on the different tasks of the challenge, and provide some discussion of the evaluation results.}
}
@inproceedings{Mamiya_13a,
  author = {Mamiya, Yoshitaka and Yamagishi, Junichi and Watts, Oliver and Clark, Robert A.J. and King, Simon and Stan, Adriana},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/0007987.pdf},
  booktitle = {Proc. ICASSP},
  year = {2013},
  abstract = {Audiobooks have been focused on as promising data for training Text-to-Speech (TTS) systems. However, they usually do not have a correspondence between audio and text data. Moreover, they are usually divided only into chapter units. In practice, we have to make a correspondence of audio and text data before we use them for building TTS synthesisers. However aligning audio and text data is time-consuming and involves manual labor. It also requires persons skilled in speech processing. Previously, we have proposed to use graphemes for automatically aligning speech and text data. This paper further integrates a lightly supervised voice activity detection (VAD) technique to detect sentence boundaries as a pre-processing step before the grapheme approach. This lightly supervised technique requires time stamps of speech and silence only for the first fifty sentences. Combining those, we can semi-automatically build TTS systems from audiobooks with minimum manual intervention. From subjective evaluations we analyse how the grapheme-based aligner and/or the proposed VAD technique impact the quality of HMM-based speech synthesisers trained on audiobooks.},
  title = {LIGHTLY SUPERVISED GMM VAD TO USE AUDIOBOOK FOR SPEECH SYNTHESISER}
}
@inproceedings{lu2013_nat,
  author = {Lu, Liang and Ghoshal, Arnab and Renals, Steve},
  title = {Noise adaptive training for subspace {Gaussian} mixture models},
  abstract = {Noise adaptive training (NAT) is an effective approach to normalise environmental distortions when training a speech recogniser on noise-corrupted speech. This paper investigates the model-based NAT scheme using joint uncertainty decoding (JUD) for subspace Gaussian mixture models (SGMMs). A typical SGMM acoustic model has much larger number of surface Gaussian components, which makes it computationally infeasible to compensate each Gaussian explicitly. JUD tackles this problem by sharing the compensation parameters among the Gaussians and hence reduces the computational and memory demands. For noise adaptive training, JUD is reformulated into a generative model, which leads to an efficient expectation-maximisation (EM) based algorithm to update the SGMM acoustic model parameters. We evaluated the SGMMs with NAT on the Aurora 4 database, and obtained higher recognition accuracy compared to systems without adaptive training.},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/lu2013noise.pdf},
  booktitle = {Proc. Interspeech},
  categories = {adaptive training, noise robustness, joint uncertainty decoding, subspace Gaussian mixture models}
}
@inproceedings{lu2013_pronunciation,
  author = {Lu, Liang and Ghoshal, Arnab and Renals, Steve},
  doi = {10.1109/ASRU.2013.6707759},
  title = {Acoustic Data-driven Pronunciation Lexicon for Large Vocabulary Speech Recognition},
  abstract = {Speech recognition systems normally use handcrafted pronunciation lexicons designed by linguistic experts. Building and maintaining such a lexicon is expensive and time consuming. This paper concerns automatically learning a pronunciation lexicon for speech recognition. We assume the availability of a small seed lexicon and then learn the pronunciations of new words directly from speech that is transcribed at word-level. We present two implementations for refining the putative pronunciations of new words based on acoustic evidence. The first one is an expectation maximization (EM) algorithm based on weighted finite state transducers (WFSTs) and the other is its Viterbi approximation. We carried out experiments on the Switchboard corpus of conversational telephone speech. The expert lexicon has a size of more than 30,000 words, from which we randomly selected 5,000 words to form the seed lexicon. By using the proposed lexicon learning method, we have significantly improved the accuracy compared with a lexicon learned using a grapheme-to-phoneme transformation, and have obtained a word error rate that approaches that achieved using a fully handcrafted lexicon.},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/liang_asru13.pdf},
  booktitle = {Proc. ASRU},
  categories = {Lexical modelling, Probabilistic pronunciation model, Automatic speech recognition}
}
@phdthesis{lu2013_thesis,
  author = {Lu, Liang},
  school = {University of Edinburgh},
  title = {Subspace {Gaussian} Mixture Models for Automatic Speech Recognition},
  abstract = {In most of state-of-the-art speech recognition systems, Gaussian mixture models (GMMs) are used to model the density of the emitting states in the hidden Markov models (HMMs). In a conventional system, the model parameters of each GMM are estimated directly and independently given the alignment. This results a large number of model parameters to be estimated, and consequently, a large amount of training data is required to fit the model. In addition, different sources of acoustic variability that impact the accuracy of a recogniser such as pronunciation variation, accent, speaker factor and environmental noise are only weakly modelled and factorized by adaptation techniques such as maximum likelihood linear regression (MLLR), maximum a posteriori adaptation (MAP) and vocal tract length normalisation (VTLN). In this thesis, we will discuss an alternative acoustic modelling approach --- the subspace Gaussian mixture model (SGMM), which is expected to deal with these two issues better. In an SGMM, the model parameters are derived from low-dimensional model and speaker subspaces that can capture phonetic and speaker correlations. Given these subspaces, only a small number of state-dependent parameters are required to derive the corresponding GMMs. Hence, the total number of model parameters can be reduced, which allows acoustic modelling with a limited amount of training data. In addition, the SGMM-based acoustic model factorizes the phonetic and speaker factors and within this framework, other source of acoustic variability may also be explored. In this thesis, we propose a regularised model estimation for SGMMs, which avoids overtraining in case that the training data is sparse. We will also take advantage of the structure of SGMMs to explore cross-lingual acoustic modelling for low-resource speech recognition. Here, the model subspace is estimated from out-domain data and ported to the target language system. In this case, only the state-dependent parameters need to be estimated which relaxes the requirement of the amount of training data. To improve the robustness of SGMMs against environmental noise, we propose to apply the joint uncertainty decoding (JUD) technique that is shown to be efficient and effective. We will report experimental results on the Wall Street Journal (WSJ) database and GlobalPhone corpora to evaluate the regularisation and cross-lingual modelling of SGMMs. Noise compensation using JUD for SGMM acoustic models is evaluated on the Aurora 4 database.},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/llu_thesis.pdf},
  categories = {subspace Gaussian mixture model, speech recognition, cross-lingual, noise robustness}
}
@inproceedings{benyoussef:IS2013,
  author = {Ben Youssef, Atef and Shimodaira, Hiroshi and Braude, David A.},
  title = {Articulatory features for speech-driven head motion synthesis},
  booktitle = {Proc. Interspeech},
  year = {2013},
  abstract = {This study investigates the use of articulatory features for speech-driven head motion synthesis as opposed to prosody features such as F0 and energy which have been mainly used in the literature. In the proposed approach, multi-stream HMMs are trained jointly on the synchronous streams of speech and head motion data. Articulatory features can be regarded as an intermediate parametrisation of speech that are expected to have a close link with head movement. Measured head and articulatory movements acquired by EMA were synchronously recorded with speech. Measured articulatory data was compared to those predicted from speech using an HMM-based inversion mapping system trained in a semi-supervised fashion. Canonical correlation analysis (CCA) on a data set of free speech of 12 people shows that the articulatory features are more correlated with head rotation than prosodic and/or cepstral speech features. It is also shown that the synthesised head motion using articulatory features give higher correlations with the original head motion than when only prosodic features are used.},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/BenYoussef-et-al_IS13.pdf},
  pages = {2758-2762}
}
@inproceedings{braude2013template,
  author = {Braude, David A. and Shimodaira, Hiroshi and Ben Youssef, Atef},
  title = {Template-Warping Based Speech Driven Head Motion Synthesis},
  booktitle = {Proc. Interspeech},
  year = {2013},
  abstract = {We propose a method for synthesising head motion from speech using a combination of an Input-Output Markov model (IOMM) and Gaussian mixture models trained in a supervised manner. A key difference of this approach compared to others is to model the head motion in each angle as a series of templates of motion rather than trying to recover a frame-wise function. The templates were chosen to reflect natural patterns in the head motion, and states for the IOMM were chosen based on statistics of the templates. This reduces the search space for the trajectories and stops impossible motions such as discontinuities from being possible. For synthesis our system warps the templates to account for the acoustic features and the other angles’ warping parameters. We show our system is capable of recovering the statistics of the motion that were chosen for the states. Our system was then compared to a baseline that used a frame-wise mapping that is based on previously published work. A subjective preference test that includes multiple speakers showed participants have a preference for the segment based approach. Both of these systems were trained on storytelling free speech.},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Braude-etal_IS13.pdf},
  pages = {2763-2767}
}
@inproceedings{benyoussef:iva2013,
  author = {Ben Youssef, Atef and Shimodaira, Hiroshi and Braude, David A.},
  title = {Head Motion Analysis and Synthesis over Different Tasks},
  booktitle = {Proc. Intelligent Virtual Agents},
  abstract = {It is known that subjects vary in their head movements. This paper presents an analysis of this variety over different tasks and speakers and their impact on head motion synthesis. Measured head and articulatory movements acquired by an ElectroMagnetic Articulograph (EMA) synchronously recorded with audio was used. Data set of speech of 12 people recorded on different tasks confirms that the head motion variate over tasks and speakers. Experimental results confirmed that the proposed models were capable of learning and synthesising task-dependent head motions from speech. Subjective evaluation of synthesised head motion using task models shows that trained models on the matched task is better than mismatched one and free speech data provide models that predict preferred motion by the participants compared to read speech data.},
  month = {September},
  year = {2013},
  organization = {Springer},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/BenYoussef-et-al_IVA13.pdf},
  pages = {285-294}
}
@inproceedings{braude:iva2013,
  author = {Braude, David A. and Shimodaira, Hiroshi and Ben Youssef, Atef},
  title = {The {University of Edinburgh} Head-Motion and Audio Storytelling ({U}o{E}-{H}A{S}) Dataset},
  booktitle = {Proc. Intelligent Virtual Agents},
  year = {2013},
  abstract = {In this paper we announce the release of a large dataset of storytelling monologue with motion capture for the head and body. Initial tests on the dataset indicate that head motion is more dependant on the speaker than the style of speech.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Braude-etal_IVA2013.pdf},
  organization = {Springer},
  pages = {466-467}
}
@inproceedings{Vesely:IS13,
  author = {Vesely, Karel and Ghoshal, Arnab and Burget, Lukáš and Povey, Daniel},
  title = {Sequence-discriminative training of deep neural networks},
  booktitle = {Proceedings of the Annual Conference of the International Speech Communication Association (Interspeech)},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  keywords = {myPubs, neural networks, discriminative training},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/is13-dnn_seq.pdf},
  abstract = {Sequence-discriminative training of deep neural networks (DNNs) is investigated on a 300 hour American English conversational telephone speech task. Different sequence-discriminative criteria --- maximum mutual information (MMI), minimum phone error (MPE), state-level minimum Bayes risk (sMBR), and boosted MMI --- are compared. Two different heuristics are investigated to improve the performance of the DNNs trained using sequence-based criteria --- lattices are re-generated after the first iteration of training; and, for MMI and BMMI, the frames where the numerator and denominator hypotheses are disjoint are removed from the gradient computation. Starting from a competitive DNN baseline trained using cross-entropy, different sequence-discriminative criteria are shown to lower word error rates by 8-9% relative, on average. Little difference is noticed between the different sequence-based criteria that are investigated. The experiments are done using the open-source Kaldi toolkit, which makes it possible for the wider community to reproduce these results.}
}
@inproceedings{godoy_mayo_stylianou_interspeech13,
  author = {Godoy, Elizabeth and Mayo, Catherine and Stylianou, Yannis},
  title = {Linking Loudness Increases in Normal and {Lombard} Speech to Decreasing Vowel Formant Separation},
  booktitle = {Proc. Interspeech},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/IS130186.PDF},
  abstract = {The increased vocal effort associated with the Lombard reflex produces speech that is perceived as louder and judged to be more intelligible in noise than normal speech. Previous work illustrates that, on average, Lombard increases in loudness result from boosting spectral energy in a frequency band spanning the range of formants F1-F3, particularly for voiced speech. Observing additionally that increases in loudness across spoken sentences are spectro-temporally localized, the goal of this work is to further isolate these regions of maximal loudness by linking them to specific formant trends, explicitly considering here the vowel formant separation. For both normal and Lombard speech, this work illustrates that, as loudness increases in frequency bands containing formants (e.g. F1-F2 or F2-F3), the observed separation between formant frequencies decreases. From a production standpoint, these results seem to highlight a physiological trait associated with how humans increase the loudness of their speech, namely moving vocal tract resonances closer together. Particularly, for Lombard speech, this phenomena is exaggerated: that is, the Lombard speech is louder and formants in corresponding spectro-temporal regions are even closer together},
  categories = {Lombard effect, loudness, vowel formant separation}
}
@article{mayo_gibbon_clark_jslhr13,
  author = {Mayo, Catherine and Gibbon, Fiona and Clark, Robert A. J.},
  doi = {doi:10.1044/1092-4388(2012/10-0280)},
  title = {Phonetically Trained and Untrained Adults' Transcription of Place of Articulation for Intervocalic Lingual Stops With Intermediate Acoustic Cues},
  journal = {Journal of Speech, Language and Hearing Research},
  abstract = {Purpose: In this study, the authors aimed to investigate how listener training and the presence of intermediate acoustic cues influence transcription variability for conflicting cue speech stimuli. Method: Twenty listeners with training in transcribing disordered speech, and 26 untrained listeners, were asked to make forced-choice labeling decisions for synthetic vowel–consonant–vowel (VCV) sequences "a doe" and "a go". Both the VC and CV transitions in these stimuli ranged through intermediate positions, from appropriate for /d/ to appropriate for /g/. Results: Both trained and untrained listeners gave more weight to the CV transitions than to the VC transitions. However, listener behavior was not uniform: The results showed a high level of inter- and intratranscriber inconsistency, with untrained listeners showing a nonsignificant tendency to be more influenced than trained listeners by CV transitions. Conclusions: Listeners do not assign consistent categorical labels to the type of intermediate, conflicting transitional cues that were present in the stimuli used in the current study and that are also present in disordered articulations. Although listener inconsistency in assigning labels to intermediate productions is not increased as a result of phonetic training, neither is it reduced by such training.},
  volume = {56},
  year = {2013},
  keywords = {speech perception, intermediate acoustic cues, phonetic transcription, multilevel logistic regression},
  pages = {779-791}
}
@inproceedings{richmond_IS2013,
  author = {Richmond, Korin and Ling, Zhenhua and Yamagishi, Junichi and Uría, Benigno},
  title = {On the Evaluation of Inversion Mapping Performance in the Acoustic Domain},
  abstract = {The two measures typically used to assess the performance of an inversion mapping method, where the aim is to estimate what articulator movements gave rise to a given acoustic signal, are root mean squared (RMS) error and correlation. In this paper, we investigate whether ``task-based'' evaluation using an articulatory-controllable HMM-based speech synthesis system can give useful additional information to complement these measures. To assess the usefulness of this evaluation approach, we use articulator trajectories estimated by a range of different inversion mapping methods as input to the synthesiser, and measure their performance in the acoustic domain in terms of RMS error of the generated acoustic parameters and with a listening test involving 30 participants. We then compare these results with the standard RMS error and correlation measures calculated in the articulatory domain. Interestingly, in the acoustic evaluation we observe one method performs with no statistically significant difference from measured articulatory data, and cases where statistically significant differences between methods exist which are not reflected in the results of the two standard measures. From our results, we conclude such task-based evaluation can indeed provide interesting extra information, and gives a useful way to compare inversion methods.},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  keywords = {Inversion mapping, evaluation, HMM synthesis},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/richmond_IS2013.pdf},
  booktitle = {Proc. Interspeech},
  pages = {1012--1016}
}
@inproceedings{doubletalk_IS2013,
  author = {Scobbie, James and Turk, Alice and Geng, Christian and King, Simon and Lickley, Robin and Richmond, Korin},
  title = {The {E}dinburgh Speech Production Facility {DoubleTalk} Corpus},
  abstract = {The DoubleTalk articulatory corpus was collected at the Edinburgh Speech Production Facility (ESPF) using two synchronized Carstens AG500 electromagnetic articulometers. The first release of the corpus comprises orthographic transcriptions aligned at phrasal level to EMA and audio data for each of 6 mixed-dialect speaker pairs. It is available from the ESPF online archive. A variety of tasks were used to elicit a wide range of speech styles, including monologue (a modified Comma Gets a Cure and spontaneous story-telling), structured spontaneous dialogue (Map Task and Diapix), a wordlist task, a memory-recall task, and a shadowing task. In this session we will demo the corpus with various examples.},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  keywords = {discourse, EMA, spontaneous speech},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/doubletalk_IS2013.pdf},
  booktitle = {Proc. Interspeech}
}
@article{Geng2013421,
  author = {Geng, Christian and Turk, Alice and Scobbie, James M. and Macmartin, Cedric and Hoole, Philip and Richmond, Korin and Wrench, Alan and Pouplier, Marianne and Bard, Ellen Gurman and Campbell, Ziggy and Dickie, Catherine and Dubourg, Eddie and Hardcastle, William and Kainada, Evia and King, Simon and Lickley, Robin and Nakai, Satsuki and Renals, Steve and White, Kevin and Wiegand, Ronny},
  doi = {http://dx.doi.org/10.1016/j.wocn.2013.07.002},
  title = {Recording speech articulation in dialogue: Evaluating a synchronized double electromagnetic articulography setup},
  url = {http://www.sciencedirect.com/science/article/pii/S0095447013000375},
  journal = {Journal of Phonetics},
  issn = {0095-4470},
  number = {6},
  abstract = {Abstract We demonstrate the workability of an experimental facility that is geared towards the acquisition of articulatory data from a variety of speech styles common in language use, by means of two synchronized electromagnetic articulography (EMA) devices. This approach synthesizes the advantages of real dialogue settings for speech research with a detailed description of the physiological reality of speech production. We describe the facility's method for acquiring synchronized audio streams of two speakers and the system that enables communication among control room technicians, experimenters and participants. Further, we demonstrate the feasibility of the approach by evaluating problems inherent to this specific setup: The first problem is the accuracy of temporal synchronization of the two \{EMA\} machines, the second is the severity of electromagnetic interference between the two machines. Our results suggest that the synchronization method used yields an accuracy of approximately 1 ms. Electromagnetic interference was derived from the complex-valued signal amplitudes. This dependent variable was analyzed as a function of the recording status -- i.e. on/off -- of the interfering machine's transmitters. The intermachine distance was varied between 1 m and 8.5 m. Results suggest that a distance of approximately 6.5 m is appropriate to achieve data quality comparable to that of single speaker recordings.},
  volume = {41},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/Geng2013421.pdf},
  pages = {421 - 431}
}
@inproceedings{steiner_AVSP2013,
  author = {Steiner, Ingmar and Richmond, Korin and Ouni, Slim},
  title = {Speech animation using electromagnetic articulography as motion capture data},
  abstract = {Electromagnetic articulography (EMA) captures the position and orientation of a number of markers, attached to the articulators, during speech. As such, it performs the same function for speech that conventional motion capture does for full-body movements acquired with optical modalities, a long-time staple technique of the animation industry. In this paper, EMA data is processed from a motion-capture perspective and applied to the visualization of an existing multimodal corpus of articulatory data, creating a kinematic 3D model of the tongue and teeth by adapting a conventional motion capture based animation paradigm. This is accomplished using off-the-shelf, open-source software. Such an animated model can then be easily integrated into multimedia applications as a digital asset, allowing the analysis of speech production in an intuitive and accessible manner. The processing of the EMA data, its co-registration with 3D data from vocal tract magnetic resonance imaging (MRI) and dental scans, and the modeling workflow are presented in detail, and several issues discussed.},
  year = {2013},
  address = {Annecy, France},
  keywords = {speech production, articulatory data, electromagnetic articulography, vocal tract, motion capture, visualization},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/steiner_AVSP2013.pdf},
  booktitle = {Proc. 12th International Conference on Auditory-Visual Speech Processing},
  pages = {55--60}
}
@inproceedings{jdriesen:iwslt_german,
  author = {Driesen, Joris and Bell, Peter and Sinclair, Mark and Renals, Steve},
  title = {Description of the {UEDIN} system for {German ASR}},
  booktitle = {Proc IWSLT},
  year = {2013},
  month = {December},
  address = {Heidelberg, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/german_iwslt.pdf},
  abstract = {In this paper we describe the ASR system for German built at the University of Edinburgh (UEDIN) for the 2013 IWSLT evaluation campaign. For ASR, the major challenge to overcome, was to find suitable acoustic training data. Due to the lack of expertly transcribed acoustic speech data for German, acoustic model training had to be performed on publicly available data crawled from the internet. For evaluation, lack of a manual segmentation into utterances was handled in two different ways: by generating an automatic segmentation, and by treating entire input files as a single segment. Demonstrating the latter method is superior in the current task, we obtained a WER of 28.16% on the dev set and 36.21% on the test set.}
}
@inproceedings{bell13_iwslt,
  author = {Bell, Peter and McInnes, Fergus and Gangireddy, Siva Reddy and Sinclair, Mark and Birch, Alexandra and Renals, Steve},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/bell13_iwslt_system.pdf},
  booktitle = {Proc. International Workshop on Spoken Language Translation},
  title = {The {UEDIN} English {ASR} System for the {IWSLT} 2013 Evaluation},
  abstract = {This paper describes the University of Edinburgh (UEDIN) English ASR system for the IWSLT 2013 Evaluation. \mbox{Notable} features of the system include deep neural network acoustic models in both tandem and hybrid configuration, cross-domain adaptation with multi-level adaptive networks, and the use of a recurrent neural network language model. Improvements to our system since the 2012 evaluation -- which include the use of a significantly improved n-gram language model -- result in a 19\% relative WER reduction on the \tstD set.},
  year = {2013}
}
@inproceedings{zwyssig2013-overlap_SS_MEMS,
  author = {Zwyssig, E. and Faubel, F. and Renals, S. and Lincoln, M.},
  doi = {10.1109/ICASSP.2013.6639033},
  title = {Recognition of overlapping speech using digital {MEMS} microphone arrays},
  booktitle = {Proc IEEE ICASSP},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/zwyssig3344-final.pdf},
  abstract = {This paper presents a new corpus comprising single and overlapping speech recorded using digital MEMS and analogue microphone arrays. In addition to this, the paper presents results from speech separation and recognition experiments on this data. The corpus is a reproduction of the multi-channel Wall Street Journal audio-visual corpus (MC-WSJ-AV), containing recorded speech in both a meeting room and an anechoic chamber using two different microphone types as well as two different array geometries. The speech separation and speech recognition experiments were performed using SRP-PHAT-based speaker localisation, superdirective beamforming and multiple post-processing schemes, such as residual echo suppression and binary masking. Our simple, cMLLR-based recognition system matches the performance of state-of-the-art ASR systems on the single speaker task and outperforms them on overlapping speech. The corpus will be made publicly available via the LDC in spring 2013.}
}
@inproceedings{astrinaki2013b,
  author = {Astrinaki, Maria and Moinet, Alexis and Yamagishi, Junichi and Richmond, Korin and Ling, Zhen-Hua and King, Simon and Dutoit, Thierry},
  title = {Mage - {HMM}-based speech synthesis reactively controlled by the articulators},
  abstract = {In this paper, we present the recent progress in the MAGE project. MAGE is a library for realtime and interactive (reactive) parametric speech synthesis using hidden Markov models (HMMs). Here, it is broadened in order to support not only the standard acoustic features (spectrum and f0) to model and synthesize speech but also to combine acoustic and articulatory features, such as tongue, lips and jaw positions. Such an integration enables the user to have a straight forward and meaningful control space to intuitively modify the synthesized phones in real time only by configuring the position of the articulators.},
  year = {2013},
  month = {August},
  address = {Barcelona, Spain},
  keywords = {speech synthesis, reactive, articulators},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ICPHS0724.pdf},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  pages = {243}
}