The Centre for Speech Technology Research, The university of Edinburgh

Publications by Herman Kamper

s1314728.bib

@inproceedings{kamper+etal_icassp15,
  author = {Kamper, H. and Elsner, M. and Jansen, A. and Goldwater, S. J.},
  title = {Unsupervised neural network based feature extraction using weak top-down constraints},
  booktitle = {Proc. ICASSP},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/kamper+elsner+jansen+goldwater_icassp2015.pdf},
  abstract = {Deep neural networks (DNNs) have become a standard component in supervised ASR, used in both data-driven feature extraction and acoustic modelling. Supervision is typically obtained from a forced alignment that provides phone class targets, requiring transcriptions and pronunciations. We propose a novel unsupervised DNN-based feature extractor that can be trained without these resources in zero-resource settings. Using unsupervised term discovery, we find pairs of isolated word examples of the same unknown type; these provide weak top-down supervision. For each pair, dynamic programming is used to align the feature frames of the two words. Matching frames are presented as input-output pairs to a deep autoencoder (AE) neural network. Using this AE as feature extractor in a word discrimination task, we achieve 64% relative improvement over a previous state-of-the-art system, 57% improvement relative to a bottom-up trained deep AE, and come to within 23% of a supervised system.},
  categories = {unsupervised feature extraction, deep neural networks, zero-resource speech processing, top-down constraints}
}
@inproceedings{kamper+etal_interspeech15,
  author = {Kamper, Herman and Goldwater, S. J. and Jansen, Aren},
  title = {Fully unsupervised small-vocabulary speech recognition using a segmental {B}ayesian model},
  booktitle = {Proc. Interspeech},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/interspeech15-unsupTIDigits.pdf},
  abstract = {Current supervised speech technology relies heavily on transcribed speech and pronunciation dictionaries. In settings where unlabelled speech data alone is available, unsupervised methods are required to discover categorical linguistic structure directly from the audio. We present a novel Bayesian model which segments unlabelled input speech into word-like units, resulting in a complete unsupervised transcription of the speech in terms of discovered word types. In our approach, a potential word segment (of arbitrary length) is embedded in a fixed-dimensional space; the model (implemented as a Gibbs sampler) then builds a whole-word acoustic model in this space while jointly doing segmentation. We report word error rates in a connected digit recognition task by mapping the unsupervised output to ground truth transcriptions. Our model outperforms a previously developed HMM-based system, even when the model is not constrained to discover only the 11 word types present in the data.},
  categories = {unsupervised speech processing, word discovery, speech segmentation, unsupervised learning, segmental models}
}
@inproceedings{kamper+etal_slt14,
  author = {Kamper, Herman and Jansen, Aren and King, Simon and Goldwater, S. J.},
  title = {Unsupervised lexical clustering of speech segments using fixed-dimensional acoustic embeddings},
  booktitle = {Proc. SLT},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/kamper+jansen+king+goldwater_slt2014.pdf},
  abstract = {Unsupervised speech processing methods are essential for applications ranging from zero-resource speech technology to modelling child language acquisition. One challenging problem is discovering the word inventory of the language: the lexicon. Lexical clustering is the task of grouping unlabelled acoustic word tokens according to type. We propose a novel lexical clustering model: variable-length word segments are embedded in a fixed-dimensional acoustic space in which clustering is then performed. We evaluate several clustering algorithms and find that the best methods produce clusters with wide variation in sizes, as observed in natural language. The best probabilistic approach is an infinite Gaussian mixture model (IGMM), which automatically chooses the number of clusters. Performance is comparable to that of non-probabilistic Chinese Whispers and average-linkage hierarchical clustering. We conclude that IGMM clustering of fixed-dimensional embeddings holds promise as the lexical clustering component in unsupervised speech processing systems.},
  categories = {lexical clustering, unsupervised learning, fixed-dimensional embeddings, lexical discovery}
}