2015.bib

@comment{{This file has been generated by bib2bib 1.92}}
@comment{{Command line: /home/korin/bibtex2html-1.92-LINUX/bib2bib -oc /home/korin/projects/publications/new_output/transitdata/2015-citations -ob /home/korin/projects/publications/new_output/transitdata/2015.bib -c 'year : "2015"' /home/korin/projects/publications/filtlists/full_publications_list.bib}}
@inproceedings{SwietojanskiICASSP15,
  author = {Swietojanski, P. and Renals, S.},
  title = {Differentiable Pooling for Unsupervised Speaker Adaptation},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/Swietojanski_ICASSP2015.pdf},
  abstract = {This paper proposes a differentiable pooling mechanism to perform model-based neural network speaker adaptation. The proposed technique learns a speaker-dependent combination of activations within pools of hidden units, was shown to work well unsupervised, and does not require speaker-adaptive training. We have conducted a set of experiments on the TED talks data, as used in the IWSLT evaluations. Our results indicate that the approach can reduce word error rates (WERs) on standard IWSLT test sets by about 5–11% relative compared to speaker-independent systems and was found complementary to the recently proposed learning hidden units contribution (LHUC) approach, reducing WER by 6–13% relative. Both methods were also found to work well when adapting with small amounts of unsupervised data – 10 seconds is able to decrease the WER by 5% relative compared to the baseline speaker independent system},
  categories = {Differentiable pooling, Speaker Adaptation, Deep Neural Networks, TED, LHUC}
}
@inproceedings{Swietojanski2015,
  author = {Swietojanski, P. and Bell, P. and Renals, S.},
  doi = {},
  title = {Structured Output Layer with Auxiliary Targets for Context-Dependent Acoustic Modelling},
  booktitle = {Proc. Interspeech},
  year = {2015},
  abstract = {In previous work we have introduced a multi-task training technique for neural network acoustic modelling, in which context-dependent and context-independent targets are jointly learned. In this paper, we extend the approach by structuring the out-put layer such that the context-dependent outputs are dependent on the context-independent outputs, thus using the context-independent predictions at run-time. We have also investigated the applicability of this idea to unsupervised speaker adaptation as an approach to overcome the data sparsity issues that comes to the fore when estimating systems with a large number of context-dependent states, when data is limited. We have experimented with various amounts of training material (from 10 to 300 hours) and find the proposed techniques are particularly well suited to data-constrained conditions allowing to better utilise large context-dependent state-clustered trees. Experimental results are reported for large vocabulary speech recognition using the Switchboard and TED corpora.},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/Swietojanski_Interspeech2015.pdf},
  pages = {},
  categories = {multitask learning, structured output layer, adap- tation, deep neural networks}
}
@inproceedings{dnnbmtl_ICASSP15,
  author = {Wu, Z. and Valentini-Botinhao, C. and Watts, O. and King, S.},
  title = {{Deep neural networks employing multi-task learning and stacked bottleneck features for speech synthesis.}},
  booktitle = {Proc. ICASSP},
  year = {2015},
  abstract = {Deep neural networks (DNNs) use a cascade of hidden representations to enable the learning of complex mappings from input to output features. They are able to learn the complex mapping from textbased linguistic features to speech acoustic features, and so perform text-to-speech synthesis. Recent results suggest that DNNs can produce more natural synthetic speech than conventional HMM-based statistical parametric systems. In this paper, we show that the hidden representation used within a DNN can be improved through the use of Multi-Task Learning, and that stacking multiple frames of hidden layer activations (stacked bottleneck features) also leads to improvements. Experimental results confirmed the effectiveness of the proposed methods, and in listening tests we find that stacked bottleneck features in particular offer a significant improvement over both a baseline DNN and a benchmark HMM system.},
  month = {April},
  address = {Brisbane, Australia},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/dnnbmtl_ICASSP15.pdf},
  pages = {4460-4464}
}
@inproceedings{rnade_ICASSP15,
  author = {Uria, B. and Murray, I. and Renals, S. and Valentini-Botinhao, C. and Bridle, J.},
  title = {{Modelling acoustic feature dependencies with artificial neural networks: Trajectory-RNADE.}},
  booktitle = {Proc. ICASSP},
  year = {2015},
  abstract = {Given a transcription, sampling from a good model of acoustic feature trajectories should result in plausible realizations of an utterance. However, samples from current probabilistic speech synthesis systems result in low quality synthetic speech. Henter et al. have demonstrated the need to capture the dependencies between acoustic features conditioned on the phonetic labels in order to obtain high quality synthetic speech. These dependencies are often ignored in neural network based acoustic models. We tackle this deficiency by introducing a probabilistic neural network model of acoustic trajectories, trajectory RNADE, able to capture these dependencies.},
  month = {April},
  address = {Brisbane, Australia},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/Uria2015.pdf},
  pages = {4465-4469}
}
@inproceedings{dnncost_IS15,
  author = {Valentini-Botinhao, C. and Wu, Z. and King, S.},
  title = {{Towards minimum perceptual error training for {DNN}-based speech synthesis}},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/dnncost_IS15.pdf},
  abstract = {We propose to use a perceptually-oriented domain to improve the quality of text-to-speech generated by deep neural networks (DNNs). We train a DNN that predicts the parameters required for speech reconstruction but whose cost function is calculated in another domain. In this paper, to represent this perceptual domain we extract an approximated version of the Spectro-Temporal Excitation Pattern that was originally proposed as part of a model of hearing speech in noise. We train DNNs that predict band aperiodicity, fundamental frequency and Mel cepstral coefficients and compare generated speech when the spectral cost function is defined in the Mel cepstral, warped log spectrum or perceptual domains. Objective results indicate that the perceptual domain system achieves the highest quality.}
}
@inproceedings{salb_IS15,
  author = {Pucher, M. and Toman, M. and Schabus, D. and Valentini-Botinhao, C. and Yamagishi, J. and Zillinger, B. and Schmid, E},
  title = {{Influence of speaker familiarity on blind and visually impaired children's perception of synthetic voices in audio games}},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/salb_IS15.pdf},
  abstract = {In this paper we evaluate how speaker familiarity influences the engagement times and performance of blind school children when playing audio games made with different synthetic voices. We developed synthetic voices of school children, their teachers and of speakers that were unfamiliar to them and used each of these voices to create variants of two audio games: a memory game and a labyrinth game. Results show that pupils had significantly longer engagement times and better performance when playing games that used synthetic voices built with their own voices. This result was observed even though the children reported not recognising the synthetic voice as their own after the experiment was over. These findings could be used to improve the design of audio games and lecture books for blind and visually impaired children.}
}
@article{Cassia_SPCOM15,
  author = {Valentini-Botinhao, Cassia and Toman, Markus and Pucher, Michael and Schabus, Dietmar and Yamagishi, Junichi},
  doi = {10.1016/j.specom.2015.09.002},
  title = {Intelligibility of time-compressed synthetic speech: Compression method and speaking style.},
  journal = {Speech Communication},
  month = {October},
  year = {2015},
  abstract = {We present a series of intelligibility experiments performed on natural and synthetic speech time-compressed at a range of rates and analyze the effect of speech corpus and compression method on the intelligibility scores of sighted and blind individuals. Particularly we are interested in comparing linear and non-linear compression methods applied to normal and fast speech of different speakers. We recorded English and German language voice talents reading prompts at a normal and a fast rate. To create synthetic voices we trained a statistical parametric speech synthesis system based on the normal and the fast data of each speaker. We compared three compression methods: scaling the variance of the state duration model, interpolating the duration models of the fast and the normal voices, and applying a linear compression method to the generated speech waveform. Word recognition results for the English voices show that generating speech at a normal speaking rate and then applying linear compression resulted in the most intelligible speech at all tested rates. A similar result was found when evaluating the intelligibility of the natural speech corpus. For the German voices, interpolation was found to be better at moderate speaking rates but the linear method was again more successful at very high rates, particularly when applied to the fast data. Phonemic level annotation of the normal and fast databases showed that the German speaker was able to reproduce speech at a fast rate with fewer deletion and substitution errors compared to the English speaker, supporting the intelligibility benefits observed when compressing his fast speech. This shows that the use of fast speech data to create faster synthetic voices does not necessarily lead to more intelligible voices as results are highly dependent on how successful the speaker was at speaking fast while maintaining intelligibility. Linear compression applied to normal rate speech can more reliably provide higher intelligibility, particularly at ultra fast rates.}
}
@article{7169536,
  author = {Chen, Ling-Hui and Raitio, T. and Valentini-Botinhao, C. and Ling, Z. and Yamagishi, J.},
  doi = {10.1109/TASLP.2015.2461448},
  title = {A Deep Generative Architecture for Postfiltering in Statistical Parametric Speech Synthesis},
  journal = {Audio, Speech, and Language Processing, IEEE/ACM Transactions on},
  issn = {2329-9290},
  number = {11},
  pages = {2003-2014},
  volume = {23},
  year = {2015},
  keywords = {HMM;deep generative architecture;modulation spectrum;postfilter;segmental quality;speech synthesis},
  abstract = {The generated speech of hidden Markov model (HMM)-based statistical parametric speech synthesis still sounds muffled. One cause of this degradation in speech quality may be the loss of fine spectral structures. In this paper, we propose to use a deep generative architecture, a deep neural network (DNN) generatively trained, as a postfilter. The network models the conditional probability of the spectrum of natural speech given that of synthetic speech to compensate for such gap between synthetic and natural speech. The proposed probabilistic postfilter is generatively trained by cascading two restricted Boltzmann machines (RBMs) or deep belief networks (DBNs) with one bidirectional associative memory (BAM). We devised two types of DNN postfilters: one operating in the mel-cepstral domain and the other in the higher dimensional spectral domain. We compare these two new data-driven postfilters with other types of postfilters that are currently used in speech synthesis: a fixed mel-cepstral based postfilter, the global variance based parameter generation, and the modulation spectrum-based enhancement. Subjective evaluations using the synthetic voices of a male and female speaker confirmed that the proposed DNN-based postfilter in the spectral domain significantly improved the segmental quality of synthetic speech compared to that with conventional methods.}
}
@inproceedings{Merritt2015Attributing,
  author = {Merritt, Thomas and Latorre, Javier and King, Simon},
  title = {{Attributing modelling errors in HMM synthesis by stepping gradually from natural to modelled speech}},
  booktitle = {{Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}},
  address = {Brisbane},
  month = {April},
  pages = {4220--4224},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/merritt2015AttributingErrors.pdf},
  abstract = {{Even the best statistical parametric speech synthesis systems do not achieve the naturalness of good unit selection. We investigated possible causes of this. By constructing speech signals that lie inbetween natural speech and the output from a complete HMM synthesis system, we investigated various effects of modelling. We manipulated the temporal smoothness and the variance of the spectral parameters to create stimuli, then presented these to listeners alongside natural and vocoded speech, as well as output from a full HMM-based text-to-speech system and from an idealised `pseudo-HMM'. All speech signals, except the natural waveform, were created using vocoders employing one of two popular spectral parameterisations: Mel-Cepstra or Mel-Line Spectral Pairs. Listeners made `same or different' pairwise judgements, from which we generated a perceptual map using Multidimensional Scaling. We draw conclusions about which aspects of HMM synthesis are limiting the naturalness of the synthetic speech.}},
  categories = {{speech synthesis, hidden Markov modelling, vocoding}}
}
@inproceedings{Merritt2015RichContext,
  author = {Merritt, Thomas and Yamagishi, Junichi and Wu, Zhizheng and Watts, Oliver and King, Simon},
  title = {{Deep neural network context embeddings for model selection in rich-context HMM synthesis}},
  booktitle = {{Proc. Interspeech}},
  address = {Dresden},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/merritt2015RichContext.pdf},
  abstract = {{This paper introduces a novel form of parametric synthesis that uses context embeddings produced by the bottleneck layer of a deep neural network to guide the selection of models in a rich-context HMM-based synthesiser. Rich-context synthesis – in which Gaussian distributions estimated from single linguistic contexts seen in the training data are used for synthesis, rather than more conventional decision tree-tied models – was originally proposed to address over-smoothing due to averaging across contexts. Our previous investigations have confirmed experimentally that averaging across different contexts is indeed one of the largest factors contributing to the limited quality of statistical parametric speech synthesis. However, a possible weakness of the rich context approach as previously formulated is that a conventional tied model is still used to guide selection of Gaussians at synthesis time. Our proposed approach replaces this with context embeddings derived from a neural network.}},
  categories = {{speech synthesis, hidden Markov model, deep neural networks, rich context, embedding}}
}
@inproceedings{kamper+etal_icassp15,
  author = {Kamper, H. and Elsner, M. and Jansen, A. and Goldwater, S. J.},
  title = {Unsupervised neural network based feature extraction using weak top-down constraints},
  booktitle = {Proc. ICASSP},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/kamper+elsner+jansen+goldwater_icassp2015.pdf},
  abstract = {Deep neural networks (DNNs) have become a standard component in supervised ASR, used in both data-driven feature extraction and acoustic modelling. Supervision is typically obtained from a forced alignment that provides phone class targets, requiring transcriptions and pronunciations. We propose a novel unsupervised DNN-based feature extractor that can be trained without these resources in zero-resource settings. Using unsupervised term discovery, we find pairs of isolated word examples of the same unknown type; these provide weak top-down supervision. For each pair, dynamic programming is used to align the feature frames of the two words. Matching frames are presented as input-output pairs to a deep autoencoder (AE) neural network. Using this AE as feature extractor in a word discrimination task, we achieve 64% relative improvement over a previous state-of-the-art system, 57% improvement relative to a bottom-up trained deep AE, and come to within 23% of a supervised system.},
  categories = {unsupervised feature extraction, deep neural networks, zero-resource speech processing, top-down constraints}
}
@inproceedings{kamper+etal_interspeech15,
  author = {Kamper, Herman and Goldwater, S. J. and Jansen, Aren},
  title = {Fully unsupervised small-vocabulary speech recognition using a segmental {B}ayesian model},
  booktitle = {Proc. Interspeech},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/interspeech15-unsupTIDigits.pdf},
  abstract = {Current supervised speech technology relies heavily on transcribed speech and pronunciation dictionaries. In settings where unlabelled speech data alone is available, unsupervised methods are required to discover categorical linguistic structure directly from the audio. We present a novel Bayesian model which segments unlabelled input speech into word-like units, resulting in a complete unsupervised transcription of the speech in terms of discovered word types. In our approach, a potential word segment (of arbitrary length) is embedded in a fixed-dimensional space; the model (implemented as a Gibbs sampler) then builds a whole-word acoustic model in this space while jointly doing segmentation. We report word error rates in a connected digit recognition task by mapping the unsupervised output to ground truth transcriptions. Our model outperforms a previously developed HMM-based system, even when the model is not constrained to discover only the 11 word types present in the data.},
  categories = {unsupervised speech processing, word discovery, speech segmentation, unsupervised learning, segmental models}
}
@inproceedings{ribeiro2015multilevel,
  author = {Ribeiro, Manuel Sam and Clark, Robert A. J.},
  title = {A Multi-Level Representation of f0 using the Continuous Wavelet Transform and the Discrete Cosine Transform},
  booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP},
  year = {2015},
  month = {April},
  address = {Brisbane, Australia},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ribeiro_and_clark_icassp15.pdf},
  abstract = {We propose a representation of f0 using the Continuous Wavelet Transform (CWT) and the Discrete Cosine Transform (DCT). The CWT decomposes the signal into various scales of selected frequencies, while the DCT compactly represents complex contours as a weighted sum of cosine functions. The proposed approach has the advantage of combining signal decomposition and higher-level representations, thus modeling low-frequencies at higher levels and high-frequencies at lower-levels. Objective results indicate that this representation improves f0 prediction over traditional short-term approaches. Subjective results show that improvements are seen over the typical MSD-HMM and are comparable to the recently proposed CWT-HMM, while using less parameters. These results are discussed and future lines of research are proposed.},
  categories = {prosody, HMM-based synthesis, f0 modeling, continuous wavelet transform, discrete cosine transform}
}
@inproceedings{ribeiro2015perceptual,
  author = {Ribeiro, Manuel Sam and Yamagishi, Junichi and Clark, Robert A. J.},
  title = {A Perceptual Investigation of Wavelet-based Decomposition of f0 for Text-to-Speech Synthesis},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ribeiro_et_al_IS15.pdf},
  abstract = {The Continuous Wavelet Transform (CWT) has been recently proposed to model f0 in the context of speech synthesis. It was shown that systems using signal decomposition with the CWT tend to outperform systems that model the signal directly. The f0 signal is typically decomposed into various scales of differing frequency. In these experiments, we reconstruct f0 with selected frequencies and ask native listeners to judge the naturalness of synthesized utterances with respect to natural speech. Results indicate that HMM-generated f0 is comparable to the CWT low frequencies, suggesting it mostly generates utterances with neutral intonation. Middle frequencies achieve very high levels of naturalness, while very high frequencies are mostly noise.},
  categories = {speech synthesis, prosody, f0 modeling, continuous wavelet transform, perceptual experiments}
}
@inproceedings{bell15_cd_multitask,
  author = {Bell, P. and Renals, S.},
  title = {Regularization of context-dependent deep neural networks with context-independent multi-task training},
  booktitle = {Proc. ICASSP},
  address = {Brisbane, Australia},
  month = {April},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bell15_cd_multitask.pdf},
  abstract = {The use of context-dependent targets has become standard in hybrid DNN systems for automatic speech recognition. However, we argue that despite the use of state-tying, optimising to context-dependent targets can lead to over-fitting, and that discriminating between arbitrary tied context-dependent targets may not be optimal. We propose a multitask learning method where the network jointly predicts context-dependent and monophone targets. We evaluate the method on a large-vocabulary lecture recognition task and show that it yields relative improvements of 3-10\% over baseline systems.}
}
@inproceedings{bell15_complementary_task_mt,
  author = {Bell, Peter and Renals, Steve},
  title = {Complementary tasks for context-dependent deep neural network acoustic models},
  booktitle = {Proc. Interspeech},
  address = {Dresden, Germany},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bell15_complementary_task_mt.pdf},
  abstract = {We have previously found that context-dependent DNN models for automatic speech recognition can be improved with the use of monophone targets as a secondary task for the network. This paper asks whether the improvements derive from the regularising effect of having a much small number of monophone outputs -- compared to the typical number of tied states -- or from the use of targets that are not tied to an arbitrary state-clustering. We investigate the use of factorised targets for left and right context, and targets motivated by articulatory properties of the phonemes. We present results on a large-vocabulary lecture recognition task. Although the regularising effect of monophones seems to be important, all schemes give substantial improvements over the baseline single task system, even though the cardinality of the outputs is relatively high.}
}
@inproceedings{bell15_news_summarisation,
  author = {Bell, Peter and Lai, Catherine and Llewellyn, Clare and Birch, Alexandra and Sinclair, Mark},
  title = {A system for automatic broadcast news summarisation, geolocation and translation},
  booktitle = {Proc. Interspeech (demo session)},
  address = {Dresden, Germany},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bell15_news_summarisation.pdf},
  abstract = {An increasing amount of news content is produced in audio-video form every day. To effectively analyse and monitoring this multilingual data stream, we require methods to extract and present audio content in accessible ways. In this paper, we describe an end-to-end system for processing and browsing audio news data. This fully automated system brings together our recent research on audio scene analysis, speech recognition, summarisation, named entity detection, geolocation, and machine translation. The graphical interface allows users to visualise the distribution of news content by entity names and story location. Browsing of news events is facilitated through extractive summaries and the ability to view transcripts in multiple languages.}
}
@inproceedings{cervone15_reported_speech_prosody,
  author = {Cervone, Alessandra and Lai, Catherine and Pareti, Silvia and Bell, Peter},
  title = {Towards automatic detection of reported speech in dialogue using prosodic cues},
  booktitle = {Proc. Interspeech},
  address = {Dresden, Germany},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/cervone15_reported_speech_prosody.pdf},
  abstract = {The phenomenon of reported speech -- whereby we quote the words, thoughts and opinions of others, or recount past dialogue -- is widespread in conversational speech. Detecting such quotations automatically has numerous applications: for example, in enhancing automatic transcription or spoken language understanding applications. However, the task is challenging, not least because lexical cues of quotations are frequently ambiguous or not present in spoken language. The aim of this paper is to identify potential prosodic cues of reported speech which could be used, along with the lexical ones, to automatically detect quotations and ascribe them to their rightful source, that is reconstructing their Attribution Relations. In order to do so we analyze SARC, a small corpus of telephone conversations that we have annotated with Attribution Relations. The results of the statistical analysis performed on the data show how variations in pitch, intensity, and timing features can be exploited as cues of quotations. Furthermore, we build a SVM classifier which integrates lexical and prosodic cues to automatically detect quotations in speech that performs significantly better than chance.}
}
@inproceedings{wester:icphs:2015,
  author = {Wester, Mirjam and Garcia Lecumberri, M. Luisa and Cooke, Martin},
  title = {/u/-fronting in {English} speakers' {L1} but not in their {L2}},
  booktitle = {Proc. ICPhS},
  year = {2015},
  month = {August},
  address = {Glasgow},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:icphs:2015.pdf},
  abstract = {This paper presents an acoustic analysis of the three corner vowels in the Diapix Foreign Language corpus (DIAPIX-FL) which contains material from English and Spanish native speakers from both their L1 and L2. We investigated how L1 vowel characteristics influence the production of L2 vowels, and to what extent a current sound change in one of the languages is reflected in the other. We find that /u/-fronting in English occurs for both native and non-native speakers, although the degree of /u/-fronting is much larger for the English group. English speakers appear to create a separate category for the L2 /u/ rather than use their L1 sound. Spanish speakers show some adjustment to their English /u/ and /a/ realisations. These findings suggest that despite limited exposure to the L2 sounds, learners are aware of realisational differences between the languages and implement them to different degrees even for non-standard variants.},
  categories = {/u/-fronting, L1, L2, non-native}
}
@inproceedings{wester:listeners:IS2015,
  author = {Wester, Mirjam and Valentini-Botinhao, Cassia and Henter, Gustav Eje},
  title = {Are we using enough listeners? {No! An empirically-supported critique of Interspeech 2014 TTS evaluations}},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  pages = {3476--3480},
  address = {Dresden},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:listeners:IS2015.pdf},
  abstract = {Tallying the numbers of listeners that took part in subjective evaluations of synthetic speech at Interspeech 2014 showed that in more than 60% of papers conclusions are based on listening tests with less than 20 listeners. Our analysis of Blizzard 2013 data shows that for a MOS test measuring naturalness a stable level of significance is only reached when more than 30 listeners are used. In this paper, we set out a list of guidelines, i.e., a checklist for carrying out meaningful subjective evaluations. We further illustrate the importance of sentence coverage and number of listeners by presenting changes to rank order and number of significant pairs by re-analysing data from the Blizzard Challenge 2013.},
  categories = {Subjective evaluation, text-to-speech, MOS test}
}
@inproceedings{wester:artificial:IS2015,
  author = {Wester, Mirjam and Aylett, Matthew and Tomalin, Marcus and Dall, Rasmus},
  title = {Artificial Personality and Disfluency},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:artificial:IS2015.pdf},
  abstract = {The focus of this paper is artificial voices with different personalities. Previous studies have shown links between an individual's use of disfluencies in their speech and their perceived personality. Here, filled pauses (uh and um) and discourse markers (like, you know, I mean) have been included in synthetic speech as a way of creating an artificial voice with different personalities. We discuss the automatic insertion of filled pauses and discourse markers (i.e., fillers) into otherwise fluent texts. The automatic system is compared to a ground truth of human "acted" filler insertion. Perceived personality (as defined by the big five personality dimensions) of the synthetic speech is assessed by means of a standardised questionnaire. Synthesis without fillers is compared to synthesis with either spontaneous or synthetic fillers. Our findings explore how the inclusion of disfluencies influences the way in which subjects rate the perceived personality of an artificial voice.},
  categories = {artificial personality, TTS, disfluency}
}
@inproceedings{tomalin:diss:2015,
  author = {Tomalin, Marcus and Wester, Mirjam and Dall, Rasmus and Byrne, Bill and King, Simon},
  title = {A Lattice-based Approach to Automatic Filled Pause Insertion},
  booktitle = {Proc. DiSS 2015},
  year = {2015},
  month = {August},
  address = {Edinburgh},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/tomalin:diss:2015.pdf},
  abstract = {This paper describes a novel method for automatically inserting filled pauses (e.g., UM) into fluent texts. Although filled pauses are known to serve a wide range of psychological and structural functions in conversational speech, they have not traditionally been modelled overtly by state-of-the-art speech synthesis systems. However, several recent systems have started to model disfluencies specifically, and so there is an increasing need to create disfluent speech synthesis input by automatically inserting filled pauses into otherwise fluent text. The approach presented here interpolates Ngrams and Full-Output Recurrent Neural Network Language Models (f-RNNLMs) in a lattice-rescoring framework. It is shown that the interpolated system outperforms separate Ngram and f-RNNLM systems, where performance is analysed using the Precision, Recall, and F-score metrics.},
  categories = {Disfluency, Filled Pauses, f-RNNLMs, Ngrams, Lattices}
}
@inproceedings{Wester:diss:2015,
  author = {Wester, Mirjam and Corley, Martin and Dall, Rasmus},
  title = {The Temporal Delay Hypothesis: Natural, Vocoded and Synthetic Speech},
  booktitle = {Proc. DiSS 2015},
  year = {2015},
  month = {August},
  address = {Edinburgh},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:diss:2015.pdf},
  abstract = {Including disfluencies in synthetic speech is being explored as a way of making synthetic speech sound more natural and conversational. How to measure whether the resulting speech is actually more natural, however, is not straightforward. Conventional approaches to synthetic speech evaluation fall short as a listener is either primed to prefer stimuli with filled pauses or, when they aren't primed they prefer more fluent speech. Psycholinguistic reaction time experiments may circumvent this issue. In this paper, we revisit one such reaction time experiment. For natural speech, delays in word onset were found to facilitate word recognition regardless of the type of delay; be they a filled pause (um), silence or a tone. We expand these experiments by examining the effect of using vocoded and synthetic speech. Our results partially replicate previous findings. For natural and vocoded speech, if the delay is a silent pause, significant increases in the speed of word recognition are found. If the delay comprises a filled pause there is a significant increase in reaction time for vocoded speech but not for natural speech. For synthetic speech, no clear effects of delay on word recognition are found. We hypothesise this is because it takes longer (requires more cognitive resources) to process synthetic speech than natural or vocoded speech.},
  categories = {delay hypothesis, disfluency}
}
@inproceedings{dall:diss2015,
  author = {Dall, Rasmus and Wester, Mirjam and Corley, Martin},
  title = {Disfluencies in change detection in natural, vocoded and synthetic speech},
  booktitle = {Proc. DiSS 2015},
  year = {2015},
  month = {August},
  address = {Edinburgh},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/dall:diss:2015.pdf},
  abstract = {In this paper, we investigate the effect of filled pauses, a discourse marker and silent pauses in a change detection experiment in natural, vocoded and synthetic speech. In natural speech change detection has been found to increase in the presence of filled pauses, we extend this work by replicating earlier findings and explore the effect of a discourse marker, like, and silent pauses. Furthermore we report how the use of "unnatural" speech, namely synthetic and vocoded, affects change detection rates. It was found that the filled pauses, the discourse marker and silent pauses all increase change detection rates in natural speech, however in neither synthetic nor vocoded speech did this effect appear. Rather, change detection rates decreased in both types of "unnatural" speech compared to natural speech. The natural results suggests that while each type of pause increase detection rates, the type of pause may have a further effect. The "unnatural" results suggest that it is not the full pipeline of synthetic speech that causes the degradation, but rather that something in the pre-processing, i.e. vocoding, of the speech database limits the resulting synthesis.},
  categories = {change detection, filled pauses, speech synthesis}
}
@inproceedings{wester:human:IS2015,
  author = {Wester, Mirjam and Wu, Zhizheng and Yamagishi, Junichi},
  title = {Human vs Machine Spoofing Detection on Wideband and Narrowband Data},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:human:IS2015.pdf},
  abstract = {How well do humans detect spoofing attacks directed at automatic speaker verification systems? This paper investigates the performance of humans at detecting spoofing attacks from speech synthesis and voice conversion systems. Two speaker verification tasks, in which the speakers were either humans or machines, were also conducted. The three tasks were carried out with two types of data: wideband (16kHz) and narrowband (8kHz) telephone line simulated data. Spoofing detection by humans was compared to automatic spoofing detection (ASD) algorithms. Listening tests were carefully constructed to en- sure the human and automatic tasks were as similar as possible taking into consideration listener’s constraints (e.g., fatigue and memory limitations). Results for human trials show the error rates on narrowband data double compared to on wide- band data. The second verification task, which included only artificial speech, showed equal overall acceptance rates for both 8kHz and 16kHz. In the spoofing detection task, there was a drop in performance on most of the artificial trials as well as on human trials. At 8kHz, 20% of human trials were incorrectly classified as artificial, compared to 12% at 16kHz. The ASD algorithms also showed a drop in performance on 8kHz data, but outperformed human listeners across the board.},
  categories = {spoofing, human performance, automatic spoofing detection}
}
@article{sizov2015joint,
  author = {Sizov, Aleksandr and Khoury, Elie and Kinnunen, Tomi and Wu, Zhizheng and Marcel, Sebastien},
  publisher = {IEEE},
  title = {Joint Speaker Verification and Antispoofing in the-Vector Space},
  journal = {IEEE Transactions on Information Forensics and Security},
  number = {4},
  volume = {10},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/TIFS2015_joint.pdf},
  pages = {821--832}
}
@inproceedings{wu2015minimum,
  author = {Wu, Zhizheng and King, Simon},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/is2015_trajectory_dnn.pdf},
  booktitle = {Interspeech},
  year = {2015},
  title = {Minimum trajectory error training for deep neural networks, combined with stacked bottleneck features}
}
@inproceedings{wu2015adaptation,
  author = {Wu, Zhizheng and Swietojanski, Pawel and Veaux, Christophe and Renals, Steve and King, Simon},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/is2015_dnn_adaptation.pdf},
  booktitle = {Interspeech},
  year = {2015},
  title = {A study of speaker adaptation for {DNN}-based speech synthesis}
}
@inproceedings{wu2015asvspoof,
  author = {Wu, Zhizheng and Kinnunen, Tomi and Evans, Nicholas and Yamagishi, Junichi and Hanilci, Cemal and Sahidullah, Md and Sizov, Aleksandr},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/is2015_asvspoof.pdf},
  booktitle = {Interspeech},
  year = {2015},
  title = {{ASVspoof} 2015: the First Automatic Speaker Verification Spoofing and Countermeasures Challenge}
}
@inproceedings{tian2015fusion,
  author = {Tian, Xiaohai and Wu, Zhizheng and Lee, Siu-Wa and Nguyen, Quy Hy and Dong, Minghui and Chng, Eng Siong},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/is2015_xiaohai_fusion.pdf},
  booktitle = {Interspeech},
  year = {2015},
  title = {System Fusion for High-Performance Voice Conversion}
}
@inproceedings{wu2015mtl,
  author = {Wu, Zhizheng and Valentini-Botinhao, Cassia and Watts, Oliver and King, Simon},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/icassp2015_dnn_tts.pdf},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2015},
  title = {Deep neural network employing multi-task learning and stacked bottleneck features for speech synthesis}
}
@inproceedings{wu2015sas,
  author = {Wu, Zhizheng and Khodabakhsh, Ali and Demiroglu, Cenk and Yamagishi, Junichi and Saito, Daisuke and Toda, Tomoki and King, Simon},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/icassp2015_sas.pdf},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2015},
  title = {{SAS}: A speaker verification spoofing database containing diverse attacks}
}
@inproceedings{tian2015sparse,
  author = {Tian, Xiaohai and Wu, Zhizheng and Lee, Siu-Wa and Nguyen, Quy Hy and Chng, Eng Siong and Dong, Minghui},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/icassp2015_sparse_warping.pdf},
  booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  year = {2015},
  title = {Sparse representation for frequency warping based voice conversion}
}
@inproceedings{Hu_ICASSP15,
  author = {Hu, Qiong and Stylianou, Yannis and Maia, Ranniery and Richmond, Korin and Yamagishi, Junichi},
  title = {METHODS FOR APPLYING DYNAMIC SINUSOIDAL MODELS TO STATISTICAL PARAMETRIC SPEECH SYNTHESIS},
  booktitle = {Proc. ICASSP},
  year = {2015},
  month = {April},
  address = {Brisbane, Austrilia},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/IC15_Qiong.pdf},
  abstract = {Sinusoidal vocoders can generate high quality speech, but they have not been extensively applied to statistical parametric speech synthesis. This paper presents two ways for using dynamic sinusoidal models for statistical speech synthesis, enabling the sinusoid parameters to be modelled in HMMbased synthesis. In the first method, features extracted from a fixed- and low-dimensional, perception-based dynamic sinusoidal model (PDM) are statistically modelled directly. In the second method, we convert both static amplitude and dynamic slope from all the harmonics of a signal, which we term the Harmonic Dynamic Model (HDM), to intermediate parameters (regularised cepstral coefficients) for modelling. During synthesis, HDM is then used to reconstruct speech. We have compared the voice quality of these two methods to the STRAIGHT cepstrum-based vocoder with mixed excitation in formal listening tests. Our results show that HDM with intermediate parameters can generate comparable quality as STRAIGHT, while PDM direct modelling seems promising in terms of producing good speech quality without resorting to intermediate parameters such as cepstra.}
}
@inproceedings{Hu_Interspeech15,
  author = {Hu, Qiong and Wu, Zhizheng and Richmond, Korin and Yamagishi, Junichi and Stylianou, Yannis and Maia, Ranniery},
  title = {Fusion of multiple parameterisations for {DNN}-based sinusoidal speech synthesis with multi-task learning},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/Q_Interspeech15.pdf},
  abstract = {It has recently been shown that deep neural networks (DNN) can improve the quality of statistical parametric speech synthesis (SPSS) when using a source-filter vocoder. Our own previous work has furthermore shown that a dynamic sinusoidal model (DSM) is also highly suited to DNN-based SPSS, whereby sinusoids may either be used themselves as a “direct parameterisation” (DIR), or they may be encoded using an “intermediate spectral parameterisation” (INT). The approach in that work was effectively to replace a decision tree with a neural network. However, waveform parameterisation and synthesis steps that have been developed to suit HMMs may not fully exploit DNN capabilities. Here, in contrast, we investigate ways to combine INT and DIR at the levels of both DNN modelling and waveform generation. For DNN training, we propose to use multi-task learning to model cepstra (from INT) and log amplitudes (from DIR) as primary and secondary tasks. Our results show combining these improves modelling accuracy for both tasks. Next, during synthesis, instead of discarding parameters from the second task, a fusion method using harmonic amplitudes derived from both tasks is applied. Preference tests show the proposed method gives improved performance, and that this applies to synthesising both with and without global variance parameters.}
}
@inproceedings{llu_is2015b,
  author = {Lu, Liang and Zhang, Xingxing and Cho, KyungHyun and Renals, Steve},
  date-modified = {2015-08-19 10:22:47 +0100},
  title = {A Study of the Recurrent Neural Network Encoder-Decoder for Large Vocabulary Speech Recognition},
  abstract = {Deep neural networks have advanced the state-of-the-art in automatic speech recognition, when combined with hidden Markov models (HMMs). Recently there has been interest in using systems based on recurrent neural networks (RNNs) to perform sequence modelling directly, without the requirement of an HMM superstructure. In this paper, we study the RNN encoder-decoder approach for large vocabulary end-to-end speech recognition, whereby an encoder transforms a sequence of acoustic vectors into a sequence of feature representations, from which a decoder recovers a sequence of words. We investigated this approach on the Switchboard corpus using a training set of around 300 hours of transcribed audio data. Without the use of an explicit language model or pronunciation lexicon, we achieved promising recognition accuracy, demonstrating that this approach warrants further investigation.},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/liang_is15a.pdf},
  booktitle = {Proc. Interspeech},
  categories = {end-to-end speech recognition, deep neural networks, recurrent neural networks, encoder-decoder},
  date-added = {2015-08-19 10:14:21 +0100}
}
@inproceedings{llu_is2015a,
  author = {Lu, Liang and Renals, Steve},
  date-modified = {2015-08-19 10:13:33 +0100},
  title = {Feature-space Speaker Adaptation for Probabilistic Linear Discriminant Analysis Acoustic Models},
  abstract = {Probabilistic linear discriminant analysis (PLDA) acoustic models extend Gaussian mixture models by factorizing the acoustic variability using state-dependent and observation-dependent variables. This enables the use of higher dimensional acoustic features, and the capture of intra-frame feature correlations. In this paper, we investigate the estimation of speaker adaptive feature-space (constrained) maximum likelihood linear regression transforms from PLDA-based acoustic models. This feature-space speaker transformation estimation approach is potentially very useful due to the ability of PLDA acoustic models to use different types of acoustic features, for example applying these transforms to deep neural network (DNN) acoustic models for cross adaptation. We evaluated the approach on the Switchboard corpus, and observe significant word error reduction by using both the mel-frequency cepstral coefficients and DNN bottleneck features.},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/liang_is15b.pdf},
  booktitle = {Proc. Interspeech},
  categories = {speech recognition, probabilistic linear discriminant analysis, speaker adaptation, fMLLR, PLDA},
  date-added = {2015-08-19 10:11:53 +0100}
}
@inproceedings{llu_icassp15,
  author = {Lu, Liang and Renals, Steve},
  date-modified = {2015-08-19 10:16:24 +0100},
  title = {Multi-frame factorisation for long-span acoustic modelling},
  abstract = {Acoustic models based on Gaussian mixture models (GMMs) typically use short span acoustic feature inputs. This does not capture long-term temporal information from speech owing to the conditional independence assumption of hidden Markov models. In this paper, we present an implicit approach that approximates the joint distribution of long span features by product of factorized models, in contrast to deep neural networks (DNNs) that model feature correlations directly. The approach is applicable to a broad range of acoustic models. We present experiments using GMM and probabilistic linear discriminant analysis (PLDA) based models on Switchboard, observing consistent word error rate reductions.},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/llu_icassp15.pdf},
  booktitle = {Proc. ICASSP},
  categories = {Acoustic modelling, long span features, multi-frame factorisation},
  date-added = {2015-08-19 10:06:12 +0100}
}
@inproceedings{tian_recognizing_2015,
  author = {Tian, Leimin and Lai, Catherine and Moore, Johanna D.},
  title = {Recognizing emotions in dialogue with disfluences and non-verbal vocalisations},
  booktitle = {Proceedings of the 4th {Interdisciplinary} {Workshop} on {Laughter} and {Other} {Non}-verbal {Vocalisations} in {Speech}},
  abstract = {We investigate the usefulness of DISfluencies and Non-verbal Vocalisations (DIS-NV) for recognizing human emotions in dialogues. The proposed fea- tures measure filled pauses, fillers, stutters, laughter, and breath in utterances. The predictiveness of DIS- NV features is compared with lexical features and state-of-the-art low-level acoustic features. Our experimental results show that using DIS-NV features alone is not as predictive as using lexical or acoustic features. However, adding them to lexical or acoustic feature set yields improvement compared to using lexical or acoustic features alone. This indi- cates that disfluencies and non-verbal vocalisations provide useful information overlooked by the other two types of features for emotion recognition},
  volume = {14},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/tian_recognizing_emotions_in_dialogues_with_disfluencies_and_non_verbal_vocalisations.pdf},
  pages = {15},
  categories = {emotion recognition, disfluency, LSTM, dialogue}
}
@inproceedings{tian_emotion_2015,
  author = {Tian, Leimin and Moore, Johanna D. and Lai, Catherine},
  title = {Emotion {Recognition} in {Spontaneous} and {Acted} {Dialogues}},
  booktitle = {Proceedings of {ACII} 2015},
  address = {Xi'an, China},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/tian2015emotion.pdf},
  abstract = {In this work, we compare emotion recognition on two types of speech: spontaneous and acted dialogues. Experiments were conducted on the AVEC 2012 database of spontaneous dialogues and the IEMOCAP database of acted dialogues. We studied the performance of two types of acoustic features for emotion recognition: knowledge-inspired disfluency and non-verbal vocalisation (DIS-NV) features, and statistical Low-Level Descriptor (LLD) based features. Both Support Vector Machines (SVM) and Long Short-Term Memory Recurrent Neural Networks (LSTM-RNN) were built using each feature set on each emotional database. Our work aims to identify aspects of the data that constrain the effectiveness of models and features. Our results show that the perfor mance of different types of features and models is influenced by the type of dialogue and the amount of training data. Because DIS-NVs are less frequent in acted dialogues than in spontaneous dialogues, the DIS-NV features perform better than the LLD features when recognizing emotions in spontaneous dialogues, but not in acted dialogues. The LSTM-RNN model gives better performance than the SVM model when there is enough training data, but the complex structure of a LSTM-RNN model may limit its performance when there is less training data available, and may also risk over-fitting. Additionally, we find that long distance contexts may be more useful when performing emotion recognition at the word level than at the utterance level.},
  categories = {emotion recognition, disfluency, laughter, speech processing, HCI, dialogue}
}
@article{LorenzoTrueba2015292,
  author = {Lorenzo-Trueba, Jaime and Barra-Chicote, Roberto and San-Segundo, Rubén and Ferreiros, Javier and Yamagishi, Junichi and Montero, Juan M.},
  note = {},
  doi = {http://dx.doi.org/10.1016/j.csl.2015.03.008},
  title = {Emotion transplantation through adaptation in HMM-based speech synthesis},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230815000376},
  journal = {Computer Speech & Language},
  issn = {0885-2308},
  number = {1},
  abstract = {Abstract This paper proposes an emotion transplantation method capable of modifying a synthetic speech model through the use of \{CSMAPLR\} adaptation in order to incorporate emotional information learned from a different speaker model while maintaining the identity of the original speaker as much as possible. The proposed method relies on learning both emotional and speaker identity information by means of their adaptation function from an average voice model, and combining them into a single cascade transform capable of imbuing the desired emotion into the target speaker. This method is then applied to the task of transplanting four emotions (anger, happiness, sadness and surprise) into 3 male speakers and 3 female speakers and evaluated in a number of perceptual tests. The results of the evaluations show how the perceived naturalness for emotional text significantly favors the use of the proposed transplanted emotional speech synthesis when compared to traditional neutral speech synthesis, evidenced by a big increase in the perceived emotional strength of the synthesized utterances at a slight cost in speech quality. A final evaluation with a robotic laboratory assistant application shows how by using emotional speech we can significantly increase the students’ satisfaction with the dialog system, proving how the proposed emotion transplantation system provides benefits in real applications.},
  volume = {34},
  year = {2015},
  keywords = {Emotion transplantation},
  pages = {292 - 307}
}
@inproceedings{hewer2015statistical,
  editor = {for ICPhS 2015, The Scottish Consortium},
  author = {Hewer, Alexander and Steiner, Ingmar and Bolkart, Timo and Wuhrer, Stefanie and Richmond, Korin},
  isbn = {978-0-85261-941-4},
  title = {A statistical shape space model of the palate surface trained on {3D MRI} scans of the vocal tract},
  abstract = {We describe a minimally-supervised method for computing a statistical shape space model of the palate surface. The model is created from a corpus of volumetric magnetic resonance imaging (MRI) scans collected from 12 speakers. We extract a 3D mesh of the palate from each speaker, then train the model using principal component analysis (PCA). The palate model is then tested using 3D MRI from another corpus and evaluated using a high-resolution optical scan. We find that the error is low even when only a handful of measured coordinates are available. In both cases, our approach yields promising results. It can be applied to extract the palate shape from MRI data, and could be useful to other analysis modalities, such as electromagnetic articulography (EMA) and ultrasound tongue imaging (UTI).},
  year = {2015},
  month = {August},
  note = {retrieved from http://www.icphs2015.info/pdfs/Papers/ICPHS0724.pdf},
  address = {Glasgow, United Kingdom},
  keywords = {vocal tract MRI, principal component analysis, palate model},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ICPHS0724.pdf},
  booktitle = {Proceedings of the 18th International Congress of Phonetic Sciences}
}
@incollection{Hewer2015NPSA,
  editor = {Breuß, Michael and Bruckstein, Alfred M. and Maragos, Petros and Wuhrer, Stefanie},
  author = {Hewer, Alexander and Wuhrer, Stefanie and Steiner, Ingmar and Richmond, Korin},
  publisher = {Springer},
  title = {Tongue mesh extraction from {3D} {MRI} data of the human vocal tract},
  series = {Mathematics and Visualization},
  booktitle = {Perspectives in Shape Analysis},
  note = {(in press)},
  year = {2015}
}
@article{richmond2015use,
  author = {Richmond, Korin and Ling, Zhen-Hua and Yamagishi, Junichi},
  doi = {10.1250/ast.36.467},
  title = {The use of articulatory movement data in speech synthesis applications: An overview - Application of articulatory movements using machine learning algorithms [Invited Review]},
  journal = {Acoustical Science and Technology},
  number = {6},
  volume = {36},
  year = {2015},
  pages = {467-477}
}
@article{richmond2015applications,
  author = {Richmond, Korin and Yamagishi, Junichi and Ling, Zhen-Hua},
  title = {Applications of articulatory movements based on machine learning},
  journal = {Journal of the Acoustical Society of Japan},
  number = {10},
  volume = {70},
  year = {2015},
  pages = {539--545}
}
@inproceedings{bell15_alignment,
  author = {Bell, Peter and Renals, Steve},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bell15_alignment.pdf},
  booktitle = {Proc. ASRU},
  title = {A system for automatic alignment of broadcast media captions using weighted finite-state transducers},
  abstract = {We describe our system for alignment of broadcast media captions in the 2015 MGB Challenge. A precise time alignment of previously-generated subtitles to media data is important in the process of caption generation by broadcasters. However, this task is challenging due to the highly diverse, often noisy content of the audio, and because the subtitles are frequently not a verbatim representation of the actual words spoken. Our system employs a two-pass approach with appropriately constrained weighted finite state transducers (WFSTs) to enable good alignment even when the audio quality would be challenging for conventional ASR. The system achieves an f-score of 0.8965 on the MGB Challenge development set.},
  year = {2015}
}
@inproceedings{ali15_multi_wer_asr,
  author = {Ali, Ahmed and Magdy, Walid and Bell, Peter and Renals, Steve},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/asru2015-multi-reference.pdf},
  booktitle = {Proc. ASRU},
  title = {Multi-reference {WER} for evaluating {ASR} for languages with no orthographic rules},
  abstract = {We describe our system for alignment of broadcast media captions in the 2015 MGB Challenge. A precise time alignment of previously-generated subtitles to media data is important in the process of caption generation by broadcasters. However, this task is challenging due to the highly diverse, often noisy content of the audio, and because the subtitles are frequently not a verbatim representation of the actual words spoken. Our system employs a two-pass approach with appropriately constrained weighted finite state transducers (WFSTs) to enable good alignment even when the audio quality would be challenging for conventional ASR. The system achieves an f-score of 0.8965 on the MGB Challenge development set.},
  year = {2015}
}
@inproceedings{bell15_mgb_challenge,
  author = {Bell, Peter and Gales, Mark and Hain, Thomas and Kilgour, Jonathan and Lanchantin, Pierre and Liu, Xunying and McParland, Andrew and Renals, Steve and Saz, Oscar and Wester, Mirjam and Woodland, Phil},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bell15_mgb_challenge.pdf},
  booktitle = {Proc. ASRU},
  title = {The {MGB} challenge: Evaluating multi-genre broadcast media recognition},
  abstract = {This paper describes the Multi-Genre Broadcast (MGB) Challenge at ASRU~2015, an evaluation focused on speech recognition, speaker diarization, and ``lightly supervised'' alignment of BBC TV recordings. The challenge training data covered the whole range of seven weeks BBC TV output across four channels, resulting in about 1,600 hours of broadcast audio. In addition several hundred million words of BBC subtitle text was provided for language modelling. A novel aspect of the evaluation was the exploration of speech recognition and speaker diarization in a longitudinal setting -- i.e. recognition of several episodes of the same show, and speaker diarization across these episodes, linking speakers. The longitudinal tasks also offered the opportunity for systems to make use of supplied metadata including show title, genre tag, and date/time of transmission. This paper describes the task data and evaluation process used in the MGB challenge, and summarises the results obtained.},
  year = {2015}
}
@inproceedings{sgangireddy_interspeech15,
  author = {Gangireddy, Siva Reddy and Renals, Steve and Nankaku, Yoshihiko and Lee, Akinobu},
  title = {Prosodically-enahanced Recurrent Neural Network Language Models},
  booktitle = {Proc. Interspeech},
  address = {Dresden, Germany},
  abstract = {Recurrent neural network language models have been shown to consistently reduce the word error rates (WERs) of large vocabulary speech recognition tasks. In this work we propose to enhance the RNNLMs with prosodic features computed using the context of the current word. Since it is plausible to compute the prosody features at the word and syllable level we have trained the models on prosody features computed at both these levels. To investigate the effectiveness of proposed models we report perplexity and WER for two speech recognition tasks, Switchboard and TED. We observed substantial improvements in perplexity and small improvements in WER.},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ispeech15.pdf},
  pages = {2390—2394},
  categories = {RNNLMs, 3-gram, prosody features, pause duration, duration of the word, syllable duration, syllable F0, GMM-HMM, DNN-HMM, Switchboard conversations and TED lectures}
}
@inproceedings{watts_blizzard2015,
  author = {Watts, Oliver and Ronanki, Srikanth and Wu, Zhizheng and Raitio, Tuomo and Suni, Antti},
  title = {{The NST--GlottHMM entry to the Blizzard Challenge 2015}},
  booktitle = {Proc. Blizzard Challenge Workshop (Interspeech Satellite)},
  year = {2015},
  month = {September},
  key = {watts_blizzard2015},
  address = {Berlin, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bc2015_nst.pdf},
  abstract = {We describe the synthetic voices forming the joint entry into the 2015 Blizzard Challenge of the Natural Speech Technology consortium, Helsinki University, and Aalto University. The 2015 Blizzard Challenge presents an opportunity to test and benchmark some of the tools we have developed to address the problem of how to produce systems in arbitrary new languages with minimal annotated data and language-specific expertise on the part of the system builders. We here explain how our tools were used to address these problems on the different tasks of the challenge, and provide some discussion of the evaluation results. Some additions to the system used to build voices for the previous Challenge are described: acoustic modelling using deep neural networks with jointly-trained duration model, and an unsupervised approach for handling the phenomenon of inherent vowel deletion which occurs in 3 of the 6 target languages.},
  categories = {statistical parametric speech synthesis, unsupervised learning, vector space model, glottal inverse filtering, deep neural network, glottal flow pulse library, schwa-deletion}
}
@article{POBLETE20151,
  author = {Poblete, Victor and Espic, Felipe and King, Simon and Stern, Richard M. and Huenupan, Fernando and Fredes, Josue and Yoma, Nestor Becerra},
  doi = {https://doi.org/10.1016/j.csl.2014.10.006},
  title = {A perceptually-motivated low-complexity instantaneous linear channel normalization technique applied to speaker verification},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230814001053},
  journal = {Computer Speech & Language},
  issn = {0885-2308},
  number = {1},
  abstract = {Abstract This paper proposes a new set of speech features called Locally-Normalized Cepstral Coefficients (LNCC) that are based on Seneff's Generalized Synchrony Detector (GSD). First, an analysis of the GSD frequency response is provided to show that it generates spurious peaks at harmonics of the detected frequency. Then, the GSD frequency response is modeled as a quotient of two filters centered at the detected frequency. The numerator is a triangular band pass filter centered around a particular frequency similar to the ordinary Mel filters. The denominator term is a filter that responds maximally to frequency components on either side of the numerator filter. As a result, a local normalization is performed without the spurious peaks of the original GSD. Speaker verification results demonstrate that the proposed LNCC features are of low computational complexity and far more effectively compensate for spectral tilt than ordinary MFCC coefficients. LNCC features do not require the computation and storage of a moving average of the feature values, and they provide relative reductions in Equal Error Rate (EER) as high as 47.7%, 34.0% or 25.8% when compared with MFCC, MFCC+CMN, or MFCC+RASTA in one case of variable spectral tilt, respectively.},
  volume = {31},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/paper_temp.pdf},
  pages = {1 - 27},
  categories = {Channel robust feature extraction, Auditorymodels, Spectral local normalization, Synchrony detection}
}
@article{chee-yong2015combining,
  author = {{Chee Yong}, Lau and Watts, Oliver and King, Simon},
  date-modified = {2018-01-19 16:43:57 +0000},
  title = {Combining Lightly-supervised Learning and User Feedback to Construct and Improve a Statistical Parametric Speech Synthesizer for Malay},
  journal = {Research Journal of Applied Sciences, Engineering and Technology},
  issn = {2040-7459},
  number = {11},
  month = dec,
  volume = {11},
  pages = {1227--1232},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/lau2015combining.pdf},
  abstract = {In this study, we aim to reduce the human effort in preparing training data for synthesizing human speech and improve the quality of synthetic speech. In spite of the learning-from-data used to train the statistical models, the construction of a statistical parametric speech synthesizer involves substantial human effort, especially when using imperfect data or working on a new language. Here, we use lightly-supervised methods for preparing the data and constructing the text-processing front end. This initial system is then iteratively improved using active learning in which feedback from users is used to disambiguate the pronunciation system in our chosen language, Malay. The data are prepared using speaker diarisation and lightly-supervised text-speech alignment. In the front end, graphemebased units are used. The active learning used small amounts of feedback from a listener to train a classifier. We report evaluations of two systems built from high-quality studio data and lower-quality `found' data respectively and show that the intelligibility of each can be improved using active learning.}
}
@inproceedings{watts2015nst,
  author = {Watts, Oliver and Ronanki, Srikanth and Wu, Zhizheng and Raitio, Tuomo and Suni, A.},
  date-modified = {2018-01-19 16:44:49 +0000},
  title = {The NST--GlottHMM entry to the Blizzard Challenge 2015},
  abstract = {We describe the synthetic voices forming the joint entry into the 2015 Blizzard Challenge of the Natural Speech Technology consortium, Helsinki University, and Aal to University. The 2015 Blizzard Challenge presents an opportunity to test and benchmark some of the tools we have developed to address the problem of how to produce systems in arbitrary new languages with minimal annotated data and language-specific expertise on the part of the system builders. We here explain how our tools were used to address these problems on the different tasks of the challenge, and provide some discussion of the evaluation results. Some additions to the system used to build voices for the previous Challenge are described: acoustic modelling using deep neural networks with jointly-trained duration model,and an unsupervised approach for handling the phenomenon of inherent vowel deletion which occurs in 3 of the 6 target languages.},
  month = sep,
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/watts2015nst-glotthmm.pdf},
  booktitle = {Proceedings of Blizzard Challenge 2015}
}
@inproceedings{watts2015sentence-level,
  author = {Watts, Oliver and Wu, Zhizheng and King, Simon},
  publisher = {International Speech Communication Association},
  date-modified = {2018-01-19 16:44:26 +0000},
  title = {Sentence-level control vectors for deep neural network speech synthesis},
  abstract = {This paper describes the use of a low-dimensional vector representation of sentence acoustics to control the output of a feed-forward deep neural network text-to-speech system on a sentence-by-sentence basis. Vector representations for sentences in the training corpus are learned during network training along with other parameters of the model. Although the network is trained on a frame-by-frame basis, the standard frame-level inputs representing linguistic features are supplemented by features from a projection layer which outputs a learned representation of sentence-level acoustic characteristics. The projection layer contains dedicated parameters for each sentence in the training data which are optimised jointly with the standard network weights. Sentence-specific parameters are optimised on all frames of the relevant sentence -- these parameters therefore allow the network to account for sentence-level variation in the data which is not predictable from the standard linguistic inputs. Results show that the global prosodic characteristics of synthetic speech can be controlled simply and robustly at run time by supplementing basic linguistic features with sentence-level control vectors which are novel but designed to be consistent with those observed in the training corpus.},
  month = sep,
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/watts2015sentence-level.pdf},
  booktitle = {INTERSPEECH 2015 16th Annual Conference of the International Speech Communication Association},
  pages = {2217--2221}
}
@inproceedings{kay2015knowledge,
  author = {Kay, Rosie and Watts, Oliver and Barra-Chicote, Roberto and Mayo, Cassie},
  date-modified = {2018-01-19 16:44:10 +0000},
  title = {Knowledge versus data in TTS: evaluation of a continuum of synthesis systems},
  abstract = {Grapheme-based models have been proposed for both ASR and TTS as a way of circumventing the lack of expert-compiled pronunciation lexicons in under-resourced languages. It is a common observation that this should work well in languages employing orthographies with a transparent letter-to-phoneme relationship,such as Spanish. Our experience has shown, however,that there is still a significant difference in intelligibility between grapheme-based systems and conventional ones for this language. This paper explores the contribution of different levels of linguistic annotation to system intelligibility, and the trade-off between those levels and the quantity of data used for training. Ten systems spaced across these two continua of knowledge and data were subjectively evaluated for intelligibility.},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/kay2015knowledge.pdf},
  booktitle = {INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden, Germany, September 6-10, 2015},
  pages = {3335--3339}
}