The Centre for Speech Technology Research, The university of Edinburgh

Publications by Joachim Fainberg

s1043206.bib

@inproceedings{joachim_fainberg_improving_2016,
  author = {Fainberg, Joachim and Bell, Peter and Lincoln, Mike and Renals, Steve},
  title = {Improving Children's Speech Recognition through Out-of-Domain Data Augmentation},
  abstract = {Children’s speech poses challenges to speech recognition due to strong age-dependent anatomical variations and a lack of large, publicly-available corpora. In this paper we explore data augmentation for children’s speech recognition using stochastic feature mapping (SFM) to transform out-of-domain adult data for both GMM-based and DNN-based acoustic models. We performed experiments on the English PF-STAR corpus, augmenting using WSJCAM0 and ABI. Our experimental results indicate that a DNN acoustic model for childrens speech can make use of adult data, and that out-of-domain SFM is more accurate than in-domain SFM.},
  address = {San Francisco, USA},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/master.pdf},
  booktitle = {Proc. Interspeech},
  categories = {speech recognition, data augmentation, children’s speech}
}
@article{fainberg2017factorised,
  author = {Fainberg, Joachim and Renals, Steve and Bell, Peter},
  title = {Factorised Representations for Neural Network Adaptation to Diverse Acoustic Environments},
  journal = {Proc. Interspeech 2017},
  abstract = {Adapting acoustic models jointly to both speaker and environment has been shown to be effective. In many realistic scenarios, however, either the speaker or environment at test time might be unknown, or there may be insufficient data to learn a joint transform. Generating independent speaker and environment transforms improves the match of an acoustic model to unseen combinations. Using i-vectors, we demonstrate that it is possible to factorise speaker or environment information using multi-condition training with neural networks. Specifically, we extract bottleneck features from networks trained to classify either speakers or environments. We perform experiments on the Wall Street Journal corpus combined with environment noise from the Diverse Environments Multichannel Acoustic Noise Database. Using the factorised i-vectors we show improvements in word error rates on perturbed versions of the eval92 and dev93 test sets, both when one factor is missing and when the factors are seen but not in the desired combination.},
  year = {2017},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/joachimIS2017.pdf},
  pages = {749--753},
  categories = {speech recognition, adaptation, acoustic factorisation, i-vectors, deep neural networks}
}
@inproceedings{bell17_transcription_correction,
  author = {Bell, Peter and Fainberg, Joachim and Lai, Catherine and Sinclair, Mark},
  title = {A system for real-time collaborative transcription correction},
  booktitle = {Proc. Interspeech (demo session)},
  month = aug,
  year = {2017},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/is2017demo_nh_1.pdf},
  abstract = {We present a system to enable efficient, collaborative human correction of ASR transcripts, designed to operate in real-time situations, for example, when post-editing live captions generated for news broadcasts. In the system, confusion networks derived from ASR lattices are used to highlight low-confident words and present alternatives to the user for quick correction. The system uses a client-server architecture, whereby information about each manual edit is posted to the server. Such information can be used to dynamically update the one-best ASR output for all utterances currently in the editing pipeline. We propose to make updates in three different ways; by finding a new one-best path through an existing ASR lattice consistent with the correction received; by identifying further instances of out-of-vocabulary terms entered by the user; and by adapting the language model on the fly. Updates are received asynchronously by the client.}
}
@inproceedings{bell2017system,
  author = {Bell, Peter and Fainberg, Joachim and Lai, Catherine and Sinclair, Mark},
  title = {A System for Real Time Collaborative Transcription Correction},
  abstract = {We present a system to enable efficient, collaborative human correction of ASR transcripts, designed to operate in real-time situations, for example, when post-editing live captions generated for news broadcasts. In the system, confusion networks derived from ASR lattices are used to highlight low-confident words and present alternatives to the user for quick correction. The system uses a client-server architecture, whereby information about each manual edit is posted to the server. Such information can be used to dynamically update the one-best ASR output for all utterances currently in the editing pipeline. We propose to make updates in three different ways; by finding a new one-best path through an existing ASR lattice consistent with the correction received; by identifying further instances of out-of-vocabulary terms entered by the user; and by adapting the language model on the fly. Updates are received asynchronously by the client.},
  pages = {817--818},
  year = {2017},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/bell2017transcriber.PDF},
  booktitle = {Proceedings of Interspeech 2017},
  categories = {speech recognition, speech transcription, language modelling}
}