1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
|
@misc{zeyer2021does,
title={Why does CTC result in peaky behavior?},
author={Albert Zeyer and Ralf Schlüter and Hermann Ney},
year={2021},
eprint={2105.14849},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@article{wavernn,
author = {Nal Kalchbrenner and
Erich Elsen and
Karen Simonyan and
Seb Noury and
Norman Casagrande and
Edward Lockhart and
Florian Stimberg and
A{\"{a}}ron van den Oord and
Sander Dieleman and
Koray Kavukcuoglu},
title = {Efficient Neural Audio Synthesis},
journal = {CoRR},
volume = {abs/1802.08435},
year = {2018},
url = {http://arxiv.org/abs/1802.08435},
eprinttype = {arXiv},
eprint = {1802.08435},
timestamp = {Mon, 13 Aug 2018 16:47:01 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1802-08435.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{RESAMPLE,
author = {Julius O. Smith},
title = {Digital Audio Resampling Home Page "Theory of Ideal Bandlimited Interpolation" section},
url = {https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html},
month = {September},
year = {2020}
}
@article{voxpopuli,
author = {Changhan Wang and
Morgane Rivi{\`{e}}re and
Ann Lee and
Anne Wu and
Chaitanya Talnikar and
Daniel Haziza and
Mary Williamson and
Juan Miguel Pino and
Emmanuel Dupoux},
title = {VoxPopuli: {A} Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation},
journal = {CoRR},
volume = {abs/2101.00390},
year = {2021},
url = {https://arxiv.org/abs/2101.00390},
eprinttype = {arXiv},
eprint = {2101.00390},
timestamp = {Thu, 12 Aug 2021 15:37:06 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2101-00390.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{specaugment,
title={SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition},
url={http://dx.doi.org/10.21437/Interspeech.2019-2680},
DOI={10.21437/interspeech.2019-2680},
journal={Interspeech 2019},
publisher={ISCA},
author={Park, Daniel S. and Chan, William and Zhang, Yu and Chiu, Chung-Cheng and Zoph, Barret and Cubuk, Ekin D. and Le, Quoc V.},
year={2019},
month={Sep}
}
@misc{ljspeech17,
author = {Keith Ito and Linda Johnson},
title = {The LJ Speech Dataset},
howpublished = {\url{https://keithito.com/LJ-Speech-Dataset/}},
year = {2017}
}
@misc{conneau2020unsupervised,
title={Unsupervised Cross-lingual Representation Learning for Speech Recognition},
author={Alexis Conneau and Alexei Baevski and Ronan Collobert and Abdelrahman Mohamed and Michael Auli},
year={2020},
eprint={2006.13979},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@inproceedings{Gales2014SpeechRA,
title={Speech recognition and keyword spotting for low-resource languages: Babel project research at CUED},
author={Mark John Francis Gales and Kate Knill and Anton Ragni and Shakti Prasad Rath},
booktitle={SLTU},
year={2014}
}
@misc{ardila2020common,
title={Common Voice: A Massively-Multilingual Speech Corpus},
author={Rosana Ardila and Megan Branson and Kelly Davis and Michael Henretty and Michael Kohler and Josh Meyer and Reuben Morais and Lindsay Saunders and Francis M. Tyers and Gregor Weber},
year={2020},
eprint={1912.06670},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@article{Pratap_2020,
title={MLS: A Large-Scale Multilingual Dataset for Speech Research},
url={http://dx.doi.org/10.21437/Interspeech.2020-2826},
DOI={10.21437/interspeech.2020-2826},
journal={Interspeech 2020},
publisher={ISCA},
author={Pratap, Vineel and Xu, Qiantong and Sriram, Anuroop and Synnaeve, Gabriel and Collobert, Ronan},
year={2020},
month={Oct}
}
@INPROCEEDINGS{librilight,
author={J. {Kahn} and M. {Rivière} and W. {Zheng} and E. {Kharitonov} and Q. {Xu} and P. E. {Mazaré} and J. {Karadayi} and V. {Liptchinsky} and R. {Collobert} and C. {Fuegen} and T. {Likhomanenko} and G. {Synnaeve} and A. {Joulin} and A. {Mohamed} and E. {Dupoux}},
booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Libri-Light: A Benchmark for ASR with Limited or No Supervision},
year={2020},
pages={7669-7673},
note = {\url{https://github.com/facebookresearch/libri-light}},
}
@INPROCEEDINGS{7178964,
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
booktitle={2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Librispeech: An ASR corpus based on public domain audio books},
year={2015},
volume={},
number={},
pages={5206-5210},
doi={10.1109/ICASSP.2015.7178964}
}
@inproceedings{ott2019fairseq,
title = {fairseq: A Fast, Extensible Toolkit for Sequence Modeling},
author = {Myle Ott and Sergey Edunov and Alexei Baevski and Angela Fan and Sam Gross and Nathan Ng and David Grangier and Michael Auli},
booktitle = {Proceedings of NAACL-HLT 2019: Demonstrations},
year = {2019},
}
@misc{baevski2020wav2vec,
title={wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations},
author={Alexei Baevski and Henry Zhou and Abdelrahman Mohamed and Michael Auli},
year={2020},
eprint={2006.11477},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{hsu2021hubert,
title={HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units},
author={Wei-Ning Hsu and Benjamin Bolte and Yao-Hung Hubert Tsai and Kushal Lakhotia and Ruslan Salakhutdinov and Abdelrahman Mohamed},
year={2021},
eprint={2106.07447},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{hannun2014deep,
title={Deep Speech: Scaling up end-to-end speech recognition},
author={Awni Hannun and Carl Case and Jared Casper and Bryan Catanzaro and Greg Diamos and Erich Elsen and Ryan Prenger and Sanjeev Satheesh and Shubho Sengupta and Adam Coates and Andrew Y. Ng},
year={2014},
eprint={1412.5567},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{graves2012sequence,
title={Sequence Transduction with Recurrent Neural Networks},
author={Alex Graves},
year={2012},
eprint={1211.3711},
archivePrefix={arXiv},
primaryClass={cs.NE}
}
@misc{collobert2016wav2letter,
title={Wav2Letter: an End-to-End ConvNet-based Speech Recognition System},
author={Ronan Collobert and Christian Puhrsch and Gabriel Synnaeve},
year={2016},
eprint={1609.03193},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@misc{kalchbrenner2018efficient,
title={Efficient Neural Audio Synthesis},
author={Nal Kalchbrenner and Erich Elsen and Karen Simonyan and Seb Noury and Norman Casagrande and Edward Lockhart and Florian Stimberg and Aaron van den Oord and Sander Dieleman and Koray Kavukcuoglu},
year={2018},
eprint={1802.08435},
archivePrefix={arXiv},
primaryClass={cs.SD}
}
@misc{gulati2020conformer,
title={Conformer: Convolution-augmented Transformer for Speech Recognition},
author={Anmol Gulati and James Qin and Chung-Cheng Chiu and Niki Parmar and Yu Zhang and Jiahui Yu and Wei Han and Shibo Wang and Zhengdong Zhang and Yonghui Wu and Ruoming Pang},
year={2020},
eprint={2005.08100},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
@article{Luo_2019,
title={Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation},
volume={27},
ISSN={2329-9304},
url={http://dx.doi.org/10.1109/TASLP.2019.2915167},
DOI={10.1109/taslp.2019.2915167},
number={8},
journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Luo, Yi and Mesgarani, Nima},
year={2019},
month={Aug},
pages={1256–1266}
}
@InProceedings{ brian_mcfee-proc-scipy-2015,
author = { {B}rian {M}c{F}ee and {C}olin {R}affel and {D}awen {L}iang and {D}aniel {P}.{W}. {E}llis and {M}att {M}c{V}icar and {E}ric {B}attenberg and {O}riol {N}ieto },
title = { librosa: {A}udio and {M}usic {S}ignal {A}nalysis in {P}ython },
booktitle = { {P}roceedings of the 14th {P}ython in {S}cience {C}onference },
pages = { 18 - 24 },
year = { 2015 },
editor = { {K}athryn {H}uff and {J}ames {B}ergstra },
doi = { 10.25080/Majora-7b98e3ed-003 }
}
@INPROCEEDINGS{6701851,
author={Perraudin, Nathanaël and Balazs, Peter and Søndergaard, Peter L.},
booktitle={2013 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
title={A fast Griffin-Lim algorithm},
year={2013},
volume={},
number={},
pages={1-4},
doi={10.1109/WASPAA.2013.6701851}}
@INPROCEEDINGS{1172092,
author={Griffin, D. and Jae Lim},
booktitle={ICASSP '83. IEEE International Conference on Acoustics, Speech, and Signal Processing},
title={Signal estimation from modified short-time Fourier transform},
year={1983},
volume={8},
number={},
pages={804-807},
doi={10.1109/ICASSP.1983.1172092}}
@INPROCEEDINGS{6854049,
author={Ghahremani, Pegah and BabaAli, Bagher and Povey, Daniel and Riedhammer, Korbinian and Trmal, Jan and Khudanpur, Sanjeev},
booktitle={2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={A pitch extraction algorithm tuned for automatic speech recognition},
year={2014},
volume={},
number={},
pages={2494-2498},
doi={10.1109/ICASSP.2014.6854049}}
@inproceedings{shen2018natural,
title={Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions},
author={Shen, Jonathan and Pang, Ruoming and Weiss, Ron J and Schuster, Mike and Jaitly, Navdeep and Yang, Zongheng and Chen, Zhifeng and Zhang, Yu and Wang, Yuxuan and Skerrv-Ryan, Rj and others},
booktitle={2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={4779--4783},
year={2018},
organization={IEEE}
}
@inproceedings{souden2009optimal,
title={On optimal frequency-domain multichannel linear filtering for noise reduction},
author={Souden, Mehrez and Benesty, Jacob and Affes, Sofiene},
booktitle={IEEE Transactions on audio, speech, and language processing},
volume={18},
number={2},
pages={260--276},
year={2009},
publisher={IEEE}
}
@inproceedings{higuchi2016robust,
title={Robust MVDR beamforming using time-frequency masks for online/offline ASR in noise},
author={Higuchi, Takuya and Ito, Nobutaka and Yoshioka, Takuya and Nakatani, Tomohiro},
booktitle={2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={5210--5214},
year={2016},
organization={IEEE}
}
@inproceedings{shi2021emformer,
title={Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency Streaming Speech Recognition},
author={Shi, Yangyang and Wang, Yongqiang and Wu, Chunyang and Yeh, Ching-Feng and Chan, Julian and Zhang, Frank and Le, Duc and Seltzer, Mike},
booktitle={ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={6783-6787},
year={2021}
}
@inproceedings{9747706,
author={Shi, Yangyang and Wu, Chunyang and Wang, Dilin and Xiao, Alex and Mahadeokar, Jay and Zhang, Xiaohui and Liu, Chunxi and Li, Ke and Shangguan, Yuan and Nagaraja, Varun and Kalinli, Ozlem and Seltzer, Mike},
booktitle={ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Streaming Transformer Transducer based Speech Recognition Using Non-Causal Convolution},
year={2022},
volume={},
number={},
pages={8277-8281},
doi={10.1109/ICASSP43922.2022.9747706}}
@article{mises1929praktische,
title={Praktische Verfahren der Gleichungsaufl{\"o}sung.},
author={Mises, RV and Pollaczek-Geiringer, Hilda},
journal={ZAMM-Journal of Applied Mathematics and Mechanics/Zeitschrift f{\"u}r Angewandte Mathematik und Mechanik},
volume={9},
number={1},
pages={58--77},
year={1929},
publisher={Wiley Online Library}
}
@article{higuchi2017online,
title={Online MVDR beamformer based on complex Gaussian mixture model with spatial prior for noise robust ASR},
author={Higuchi, Takuya and Ito, Nobutaka and Araki, Shoko and Yoshioka, Takuya and Delcroix, Marc and Nakatani, Tomohiro},
journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
volume={25},
number={4},
pages={780--793},
year={2017},
publisher={IEEE}
}
@article{capon1969high,
title={High-resolution frequency-wavenumber spectrum analysis},
author={Capon, Jack},
journal={Proceedings of the IEEE},
volume={57},
number={8},
pages={1408--1418},
year={1969},
publisher={IEEE}
}
@article{kahn2022flashlight,
title={Flashlight: Enabling Innovation in Tools for Machine Learning},
author={Kahn, Jacob and Pratap, Vineel and Likhomanenko, Tatiana and Xu, Qiantong and Hannun, Awni and Cai, Jeff and Tomasello, Paden and Lee, Ann and Grave, Edouard and Avidov, Gilad and others},
journal={arXiv preprint arXiv:2201.12465},
year={2022}
}
@TECHREPORT{Kominek03cmuarctic,
author = {John Kominek and Alan W Black and Ver Ver},
title = {CMU Arctic Databases for Speech Synthesis},
institution = {},
year = {2003}
}
@misc{cosentino2020librimix,
title={LibriMix: An Open-Source Dataset for Generalizable Speech Separation},
author={Joris Cosentino and Manuel Pariente and Samuele Cornell and Antoine Deleforge and Emmanuel Vincent},
year={2020},
eprint={2005.11262},
archivePrefix={arXiv},
primaryClass={eess.AS}
}
@article{Zen2019LibriTTSAC,
title={LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech},
author={Heiga Zen and Viet-Trung Dang and Robert A. J. Clark and Yu Zhang and Ron J. Weiss and Ye Jia and Z. Chen and Yonghui Wu},
journal={ArXiv},
year={2019},
volume={abs/1904.02882}
}
@article{speechcommandsv2,
author = { {Warden}, P.},
title = "{Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition}",
journal = {ArXiv e-prints},
archivePrefix = "arXiv",
eprint = {1804.03209},
primaryClass = "cs.CL",
keywords = {Computer Science - Computation and Language, Computer Science - Human-Computer Interaction},
year = 2018,
month = apr,
url = {https://arxiv.org/abs/1804.03209},
}
@inproceedings{rousseau2012tedlium,
title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
author={Rousseau, Anthony and Del{\'e}glise, Paul and Est{\`e}ve, Yannick},
booktitle={Conference on Language Resources and Evaluation (LREC)},
pages={125--129},
year={2012}
}
@misc{yamagishi2019vctk,
author={Yamagishi, Junichi and Veaux, Christophe and MacDonald, Kirsten},
title={ {CSTR VCTK Corpus}: English Multi-speaker Corpus for {CSTR} Voice Cloning Toolkit (version 0.92)},
publisher={University of Edinburgh. The Centre for Speech Technology Research (CSTR)},
year=2019,
doi={10.7488/ds/2645},
}
@misc{Sarfjoo2018DeviceRV,
title={Device Recorded VCTK (Small subset version)},
author={Seyyed Saeed Sarfjoo and Junichi Yamagishi},
year={2018}
}
@misc{tzanetakis_essl_cook_2001,
author = "Tzanetakis, George and Essl, Georg and Cook, Perry",
title = "Automatic Musical Genre Classification Of Audio Signals",
url = "http://ismir2001.ismir.net/pdf/tzanetakis.pdf",
publisher = "The International Society for Music Information Retrieval",
year = "2001"
}
@article{Mir2015QUESST2014EQ,
title={QUESST2014: Evaluating Query-by-Example Speech Search in a zero-resource setting with real-life queries},
author={Xavier Anguera Miro and Luis Javier Rodriguez-Fuentes and Andi Buzo and Florian Metze and Igor Szoke and Mikel Pe{\~n}agarikano},
journal={2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
year={2015},
pages={5833-5837}
}
@misc{cmudict,
title={The Carnegie Mellon pronuncing dictionary},
author={Weide, R.L.},
year={1998},
url={http://www.speech.cs.cmu.edu/cgi-bin/cmudict},
}
@misc{YesNo,
title="YesNo",
url="http://www.openslr.org/1/"
}
@misc{MUSDB18HQ,
author = {Rafii, Zafar and Liutkus, Antoine and Fabian-Robert St{\"o}ter and Mimilakis, Stylianos Ioannis and
Bittner, Rachel},
title = {{MUSDB18-HQ} - an uncompressed version of MUSDB18},
month = dec,
year = 2019,
doi = {10.5281/zenodo.3338373},
url = {https://doi.org/10.5281/zenodo.3338373}
}
@inproceedings{fluent,
author = {Loren Lugosch and Mirco Ravanelli and Patrick Ignoto and Vikrant Singh Tomar and Yoshua Bengio},
editor = {Gernot Kubin and Zdravko Kacic},
title = {Speech Model Pre-Training for End-to-End Spoken Language Understanding},
booktitle = {Proc. of Interspeech},
pages = {814--818},
year = {2019},
}
@article{nagrani2017voxceleb,
title={Voxceleb: a large-scale speaker identification dataset},
author={Nagrani, Arsha and Chung, Joon Son and Zisserman, Andrew},
journal={arXiv preprint arXiv:1706.08612},
year={2017}
}
@inproceedings{defossez2021hybrid,
title={Hybrid Spectrogram and Waveform Source Separation},
author={D{\'e}fossez, Alexandre},
booktitle={Proceedings of the ISMIR 2021 Workshop on Music Source Separation},
year={2021}
}
@article{CATTONI2021101155,
title = {MuST-C: A multilingual corpus for end-to-end speech translation},
journal = {Computer Speech & Language},
volume = {66},
pages = {101155},
year = {2021},
issn = {0885-2308},
doi = {https://doi.org/10.1016/j.csl.2020.101155},
url = {https://www.sciencedirect.com/science/article/pii/S0885230820300887},
author = {Roldano Cattoni and Mattia Antonino {Di Gangi} and Luisa Bentivogli and Matteo Negri and Marco Turchi},
keywords = {Spoken language translation, Multilingual corpus},
abstract = {End-to-end spoken language translation (SLT) has recently gained popularity thanks to the advancement of sequence to sequence learning in its two parent tasks: automatic speech recognition (ASR) and machine translation (MT). However, research in the field has to confront with the scarcity of publicly available corpora to train data-hungry neural networks. Indeed, while traditional cascade solutions can build on sizable ASR and MT training data for a variety of languages, the available SLT corpora suitable for end-to-end training are few, typically small and of limited language coverage. We contribute to fill this gap by presenting MuST-C, a large and freely available Multilingual Speech Translation Corpus built from English TED Talks. Its unique features include: i) language coverage and diversity (from English into 14 languages from different families), ii) size (at least 237 hours of transcribed recordings per language, 430 on average), iii) variety of topics and speakers, and iv) data quality. Besides describing the corpus creation methodology and discussing the outcomes of empirical and manual quality evaluations, we present baseline results computed with strong systems on each language direction covered by MuST-C.}
}
@article{iemocap,
author = {Busso, Carlos and Bulut, Murtaza and Lee, Chi-Chun and Kazemzadeh, Abe and Mower Provost, Emily and Kim, Samuel and Chang, Jeannette and Lee, Sungbok and Narayanan, Shrikanth},
year = {2008},
month = {12},
pages = {335-359},
title = {IEMOCAP: Interactive emotional dyadic motion capture database},
volume = {42},
journal = {Language Resources and Evaluation},
doi = {10.1007/s10579-008-9076-6}
}
@article{coucke2018snips,
title={Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces},
author={Coucke, Alice and Saade, Alaa and Ball, Adrien and Bluche, Th{\'e}odore and Caulier, Alexandre and Leroy, David and Doumouro, Cl{\'e}ment and Gisselbrecht, Thibault and Caltagirone, Francesco and Lavril, Thibaut and others},
journal={arXiv preprint arXiv:1805.10190},
year={2018}
}
@INPROCEEDINGS{9746490,
author={Srivastava, Sangeeta and Wang, Yun and Tjandra, Andros and Kumar, Anurag and Liu, Chunxi and Singh, Kritika and Saraf, Yatharth},
booktitle={ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Conformer-Based Self-Supervised Learning For Non-Speech Audio Tasks},
year={2022},
volume={},
number={},
pages={8862-8866},
doi={10.1109/ICASSP43922.2022.9746490}}
@article{chen2022wavlm,
title={Wavlm: Large-scale self-supervised pre-training for full stack speech processing},
author={Chen, Sanyuan and Wang, Chengyi and Chen, Zhengyang and Wu, Yu and Liu, Shujie and Chen, Zhuo and Li, Jinyu and Kanda, Naoyuki and Yoshioka, Takuya and Xiao, Xiong and others},
journal={IEEE Journal of Selected Topics in Signal Processing},
volume={16},
number={6},
pages={1505--1518},
year={2022},
publisher={IEEE}
}
@inproceedings{GigaSpeech2021,
title={GigaSpeech: An Evolving, Multi-domain ASR Corpus with 10,000 Hours of Transcribed Audio},
booktitle={Proc. Interspeech 2021},
year=2021,
author={Guoguo Chen and Shuzhou Chai and Guanbo Wang and Jiayu Du and Wei-Qiang Zhang and Chao Weng and Dan Su and Daniel Povey and Jan Trmal and Junbo Zhang and Mingjie Jin and Sanjeev Khudanpur and Shinji Watanabe and Shuaijiang Zhao and Wei Zou and Xiangang Li and Xuchen Yao and Yongqing Wang and Yujun Wang and Zhao You and Zhiyong Yan}
}
@inproceedings{NEURIPS2020_c5d73680,
author = {Kong, Jungil and Kim, Jaehyeon and Bae, Jaekyoung},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin},
pages = {17022--17033},
publisher = {Curran Associates, Inc.},
title = {HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis},
url = {https://proceedings.neurips.cc/paper/2020/file/c5d736809766d46260d816d8dbc9eb44-Paper.pdf},
volume = {33},
year = {2020}
}
@inproceedings{ko15_interspeech,
author={Tom Ko and Vijayaditya Peddinti and Daniel Povey and Sanjeev Khudanpur},
title={{Audio augmentation for speech recognition}},
year=2015,
booktitle={Proc. Interspeech 2015},
pages={3586--3589},
doi={10.21437/Interspeech.2015-711}
}
@misc{musan2015,
author = {David Snyder and Guoguo Chen and Daniel Povey},
title = {{MUSAN}: {A} {M}usic, {S}peech, and {N}oise {C}orpus},
year = {2015},
eprint = {1510.08484},
note = {arXiv:1510.08484v1}
}
@article{babu2021xls,
title={XLS-R: Self-supervised cross-lingual speech representation learning at scale},
author={Babu, Arun and Wang, Changhan and Tjandra, Andros and Lakhotia, Kushal and Xu, Qiantong and Goyal, Naman and Singh, Kritika and von Platen, Patrick and Saraf, Yatharth and Pino, Juan and others},
journal={arXiv preprint arXiv:2111.09296},
year={2021}
}
@inproceedings{valk2021voxlingua107,
title={VoxLingua107: a dataset for spoken language recognition},
author={Valk, J{\"o}rgen and Alum{\"a}e, Tanel},
booktitle={2021 IEEE Spoken Language Technology Workshop (SLT)},
pages={652--658},
year={2021},
organization={IEEE}
}
@inproceedings{scheibler2018pyroomacoustics,
title={Pyroomacoustics: A python package for audio room simulation and array processing algorithms},
author={Scheibler, Robin and Bezzam, Eric and Dokmani{\'c}, Ivan},
booktitle={2018 IEEE international conference on acoustics, speech and signal processing (ICASSP)},
pages={351--355},
year={2018},
organization={IEEE}
}
@article{allen1979image,
title={Image method for efficiently simulating small-room acoustics},
author={Allen, Jont B and Berkley, David A},
journal={The Journal of the Acoustical Society of America},
volume={65},
number={4},
pages={943--950},
year={1979},
publisher={Acoustical Society of America}
}
@misc{wiki:Absorption_(acoustics),
author = "{Wikipedia contributors}",
title = "Absorption (acoustics) --- {W}ikipedia{,} The Free Encyclopedia",
url = "https://en.wikipedia.org/wiki/Absorption_(acoustics)",
note = "[Online]"
}
@article{reddy2020interspeech,
title={The interspeech 2020 deep noise suppression challenge: Datasets, subjective testing framework, and challenge results},
author={Reddy, Chandan KA and Gopal, Vishak and Cutler, Ross and Beyrami, Ebrahim and Cheng, Roger and Dubey, Harishchandra and Matusevych, Sergiy and Aichner, Robert and Aazami, Ashkan and Braun, Sebastian and others},
journal={arXiv preprint arXiv:2005.13981},
year={2020}
}
@article{manocha2022speech,
title={Speech quality assessment through MOS using non-matching references},
author={Manocha, Pranay and Kumar, Anurag},
journal={arXiv preprint arXiv:2206.12285},
year={2022}
}
@article{cooper2021voices,
title={How do voices from past speech synthesis challenges compare today?},
author={Cooper, Erica and Yamagishi, Junichi},
journal={arXiv preprint arXiv:2105.02373},
year={2021}
}
@article{mysore2014can,
title={Can we automatically transform speech recorded on common consumer devices in real-world environments into professional production quality speech?—a dataset, insights, and challenges},
author={Mysore, Gautham J},
journal={IEEE Signal Processing Letters},
volume={22},
number={8},
pages={1006--1010},
year={2014},
publisher={IEEE}
}
@article{kumar2023torchaudio,
title={TorchAudio-Squim: Reference-less Speech Quality and Intelligibility measures in TorchAudio},
author={Kumar, Anurag and Tan, Ke and Ni, Zhaoheng and Manocha, Pranay and Zhang, Xiaohui and Henderson, Ethan and Xu, Buye},
journal={arXiv preprint arXiv:2304.01448},
year={2023}
}
@incollection{45611,
title = {CNN Architectures for Large-Scale Audio Classification},
author = {Shawn Hershey and Sourish Chaudhuri and Daniel P. W. Ellis and Jort F. Gemmeke and Aren Jansen and Channing Moore and Manoj Plakal and Devin Platt and Rif A. Saurous and Bryan Seybold and Malcolm Slaney and Ron Weiss and Kevin Wilson},
year = {2017},
URL = {https://arxiv.org/abs/1609.09430},
booktitle = {International Conference on Acoustics, Speech and Signal Processing (ICASSP)}
}
@misc{pratap2023scaling,
title={Scaling Speech Technology to 1,000+ Languages},
author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli},
year={2023},
eprint={2305.13516},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@article{dowson1982frechet,
title={The Fr{\'e}chet distance between multivariate normal distributions},
author={Dowson, DC and Landau, BV666017},
journal={Journal of multivariate analysis},
volume={12},
number={3},
pages={450--455},
year={1982},
publisher={Elsevier}
}
@inproceedings{ismir_YuF23,
author = {Chin{-}Yun Yu and
Gy{\"{o}}rgy Fazekas},
editor = {Augusto Sarti and
Fabio Antonacci and
Mark Sandler and
Paolo Bestagini and
Simon Dixon and
Beici Liang and
Ga{\"{e}}l Richard and
Johan Pauwels},
title = {Singing Voice Synthesis Using Differentiable {LPC} and Glottal-Flow-Inspired
Wavetables},
booktitle = {Proceedings of the 24th International Society for Music Information
Retrieval Conference, {ISMIR} 2023, Milan, Italy, November 5-9, 2023},
pages = {667--675},
year = {2023},
url = {https://doi.org/10.5281/zenodo.10265377},
doi = {10.5281/ZENODO.10265377},
timestamp = {Mon, 18 Dec 2023 11:23:01 +0100},
biburl = {https://dblp.org/rec/conf/ismir/YuF23.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{forgione2021dynonet,
title={dynoNet: A neural network architecture for learning dynamical systems},
author={Forgione, Marco and Piga, Dario},
journal={International Journal of Adaptive Control and Signal Processing},
volume={35},
number={4},
pages={612--626},
year={2021},
publisher={Wiley Online Library}
}
@inproceedings{ycy2024diffapf,
title={Differentiable All-pole Filters for Time-varying Audio Systems},
author={Chin-Yun Yu and Christopher Mitcheltree and Alistair Carson and Stefan Bilbao and Joshua D. Reiss and György Fazekas},
booktitle={International Conference on Digital Audio Effects (DAFx)},
year={2024},
pages={345--352},
}
|