[ { "Percentage error": 14.2, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "LiGRU + Dropout + BatchNorm + Monophone Reg", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 14.5, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "LSTM + Dropout + BatchNorm + Monophone Reg", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 14.9, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "GRU + Dropout + BatchNorm + Monophone Reg", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 14.9, "code_links": [], "date": "2018-03-26", "date2": 20180326, "model": "Li-GRU + fMLLR features", "paper": { "title": "Light Gated Recurrent Units for Speech Recognition", "url": "https://cknow.io/lib/da594641b0a9a90f" }, "paper_data_uoa": "da594641b0a9a90f" }, { "Percentage error": 15.9, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "RNN + Dropout + BatchNorm + Monophone Reg", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 16, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "LSTM", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 16.3, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "Li-GRU", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 16.5, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "RNN", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 16.6, "code_links": [ { "title": "mravanelli/pytorch-kaldi", "url": "https://github.com/mravanelli/pytorch-kaldi" }, { "title": "xpz123/pytorch-kaldi", "url": "https://github.com/xpz123/pytorch-kaldi" } ], "date": "2018-11-19", "date2": 20181119, "model": "GRU", "paper": { "title": "The PyTorch-Kaldi Speech Recognition Toolkit", "url": "https://cknow.io/lib/437631894186a91a" }, "paper_data_uoa": "437631894186a91a" }, { "Percentage error": 16.7, "code_links": [], "date": "2018-03-26", "date2": 20180326, "model": "Light Gated Recurrent Units", "paper": { "title": "Light Gated Recurrent Units for Speech Recognition", "url": "https://cknow.io/lib/da594641b0a9a90f" }, "paper_data_uoa": "da594641b0a9a90f" }, { "Percentage error": 17.3, "code_links": [], "date": "2016-03-01", "date2": 20160301, "model": "RNN-CRF on 24(x3) MFSC", "paper": { "title": "Segmental Recurrent Neural Networks for End-to-end Speech Recognition", "url": "https://cknow.io/lib/62eac4f140bda74e" }, "paper_data_uoa": "62eac4f140bda74e" }, { "Percentage error": 17.6, "code_links": [ { "title": "Alexander-H-Liu/End-to-end-ASR-Pytorch", "url": "https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch" }, { "title": "sooftware/Attention-Implementation", "url": "https://github.com/sooftware/Attention-Implementation" }, { "title": "sh951011/Attention-Implementation", "url": "https://github.com/sh951011/Attention-Implementation" }, { "title": "sh951011/Attention-Implementation", "url": "https://github.com/sh951011/Attention-Implementation/blob/master/README.md" }, { "title": "CKRC24/Listen-and-Translate", "url": "https://github.com/CKRC24/Listen-and-Translate" }, { "title": "jin-zhe/deeplearning", "url": "https://github.com/jin-zhe/deeplearning" } ], "date": "2015-06-24", "date2": 20150624, "model": "Bi-RNN + Attention", "paper": { "title": "Attention-Based Models for Speech Recognition", "url": "https://cknow.io/lib/19af797fb4d51e83" }, "paper_data_uoa": "19af797fb4d51e83" }, { "Percentage error": 17.7, "code_links": [ { "title": "1ytic/warp-rnnt", "url": "https://github.com/1ytic/warp-rnnt" }, { "title": "jeremyrchow/text-generation-kaggle", "url": "https://github.com/jeremyrchow/text-generation-kaggle" } ], "date": "2013-03-22", "date2": 20130322, "model": "Bi-LSTM + skip connections w/ CTC", "paper": { "title": "Speech Recognition with Deep Recurrent Neural Networks", "url": "https://cknow.io/lib/8c2ac15865f7ec4d" }, "paper_data_uoa": "8c2ac15865f7ec4d" }, { "Percentage error": 19.64, "code_links": [ { "title": "Orkis-Research/Pytorch-Quaternion-Neural-Networks", "url": "https://github.com/Orkis-Research/Pytorch-Quaternion-Neural-Networks" }, { "title": "Riccardo-Vecchi/Pytorch-Quaternion-Neural-Networks", "url": "https://github.com/Riccardo-Vecchi/Pytorch-Quaternion-Neural-Networks" } ], "date": "2018-06-20", "date2": 20180620, "model": "QCNN-10L-256FM", "paper": { "title": "Quaternion Convolutional Neural Networks for End-to-End Automatic Speech Recognition", "url": "https://cknow.io/lib/8d5b2c497074ea9b" }, "paper_data_uoa": "8d5b2c497074ea9b" }, { "Percentage error": 20.1, "code_links": [ { "title": "craffel/mad", "url": "https://github.com/craffel/mad" } ], "date": "2017-04-03", "date2": 20170403, "model": "Soft Monotonic Attention (ours, offline)", "paper": { "title": "Online and Linear-Time Attention by Enforcing Monotonic Alignments", "url": "https://cknow.io/lib/4888a613943cd0a9" }, "paper_data_uoa": "4888a613943cd0a9" }, { "Percentage error": 20.4, "code_links": [ { "title": "sciforce/phones-las", "url": "https://github.com/sciforce/phones-las" } ], "date": "2019-07-02", "date2": 20190702, "model": "LAS multitask with indicators sampling", "paper": { "title": "Attention model for articulatory features detection", "url": "https://cknow.io/lib/4d7099820e38e92e" }, "paper_data_uoa": "4d7099820e38e92e" } ]