[ { "Accuracy (RGB+pose)": 90.4, "Accuracy (pose)": 67.9, "code_links": [], "date": "2018-06-01", "date2": 20180601, "model": "Potion", "paper": { "title": "PoTion: Pose MoTion Representation for Action Recognition", "url": "https://cknow.io/lib/8a812422fc8eff06" }, "paper_data_uoa": "8a812422fc8eff06" }, { "Accuracy (RGB+pose)": 86.1, "code_links": [], "date": "2019-06-01", "date2": 20190601, "model": "PA3D+RPAN", "paper": { "title": "PA3D: Pose-Action 3D Machine for Video Recognition", "url": "https://cknow.io/lib/467c9c193c5b6d70" }, "paper_data_uoa": "467c9c193c5b6d70" }, { "Accuracy (RGB+pose)": 85.5, "code_links": [], "date": "2018-06-01", "date2": 20180601, "model": "I3D + Potion", "paper": { "title": "PoTion: Pose MoTion Representation for Action Recognition", "url": "https://cknow.io/lib/8a812422fc8eff06" }, "paper_data_uoa": "8a812422fc8eff06" }, { "Accuracy (RGB+pose)": 84.1, "code_links": [ { "title": "deepmind/kinetics-i3d", "url": "https://github.com/deepmind/kinetics-i3d" }, { "title": "piergiaj/pytorch-i3d", "url": "https://github.com/piergiaj/pytorch-i3d" }, { "title": "yaohungt/GSTEG_CVPR_2019", "url": "https://github.com/yaohungt/GSTEG_CVPR_2019" }, { "title": "dlpbc/keras-kinetics-i3d", "url": "https://github.com/dlpbc/keras-kinetics-i3d" }, { "title": "FrederikSchorr/sign-language", "url": "https://github.com/FrederikSchorr/sign-language" }, { "title": "OanaIgnat/i3d_keras", "url": "https://github.com/OanaIgnat/i3d_keras" }, { "title": "ahsaniqbal/Kinetics-FeatureExtractor", "url": "https://github.com/ahsaniqbal/Kinetics-FeatureExtractor" }, { "title": "novav/CV_Spatiotemporal_Feature_3D_CNN", "url": "https://github.com/novav/CV_Spatiotemporal_Feature_3D_CNN" }, { "title": "sebastiantiesmeyer/deeplabchop3d", "url": "https://github.com/sebastiantiesmeyer/deeplabchop3d" }, { "title": "vijayvee/behavior-recognition", "url": "https://github.com/vijayvee/behavior-recognition" }, { "title": "vijayvee/behavior_recognition", "url": "https://github.com/vijayvee/behavior_recognition" }, { "title": "AbdurrahmanNadi/activity_recognition_web_service", "url": "https://github.com/AbdurrahmanNadi/activity_recognition_web_service" }, { "title": "Alexyuda/action_recognition", "url": "https://github.com/Alexyuda/action_recognition" } ], "date": "2017-05-22", "date2": 20170522, "model": "I3D", "paper": { "title": "Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset", "url": "https://cknow.io/lib/e93903d8f34491a9" }, "paper_data_uoa": "e93903d8f34491a9" }, { "Accuracy (RGB+pose)": 83.9, "code_links": [ { "title": "agethen/RPAN", "url": "https://github.com/agethen/RPAN" } ], "date": "2017-10-22", "date2": 20171022, "model": "RPAN", "paper": { "title": "RPAN: An End-to-End Recurrent Pose-Attention Network for Action Recognition in Videos", "url": "https://cknow.io/lib/9548dda7accceea0" }, "paper_data_uoa": "9548dda7accceea0" }, { "Accuracy (RGB+pose)": 76.1, "Accuracy (pose)": 56.8, "code_links": [ { "title": "mzolfaghari/chained-multistream-networks", "url": "https://github.com/mzolfaghari/chained-multistream-networks" } ], "date": "2017-04-03", "date2": 20170403, "model": "Chained (RGB+Flow +Pose)", "paper": { "title": "Chained Multi-stream Networks Exploiting Pose, Motion, and Appearance for Action Classification and Detection", "url": "https://cknow.io/lib/80a449a93824e97b" }, "paper_data_uoa": "80a449a93824e97b" }, { "Accuracy (RGB+pose)": 71.1, "code_links": [], "date": "2016-09-17", "date2": 20160917, "model": "MR Two-Sream R-CNN", "paper": { "title": "Multi-region two-stream R-CNN for action detection", "url": "https://cknow.io/lib/d4469b1efd718403" }, "paper_data_uoa": "d4469b1efd718403" }, { "Accuracy (RGB+pose)": 69.5, "code_links": [], "date": "2019-06-01", "date2": 20190601, "model": "PA3D", "paper": { "title": "PA3D: Pose-Action 3D Machine for Video Recognition", "url": "https://cknow.io/lib/467c9c193c5b6d70" }, "paper_data_uoa": "467c9c193c5b6d70" }, { "Accuracy (RGB+pose)": 64.3, "code_links": [], "date": "2019-02-26", "date2": 20190226, "model": "STAR-Net", "paper": { "title": "STAR-Net: Action Recognition using Spatio-Temporal Activation Reprojection", "url": "https://cknow.io/lib/a33467d9cece1097" }, "paper_data_uoa": "a33467d9cece1097" }, { "Accuracy (RGB+pose)": 62.5, "code_links": [], "date": "2014-11-21", "date2": 20141121, "model": "Action Tubes", "paper": { "title": "Finding Action Tubes", "url": "https://cknow.io/lib/52a12df6112d13ac" }, "paper_data_uoa": "52a12df6112d13ac" }, { "Accuracy (RGB+pose)": "-", "Accuracy (pose)": 77.2, "code_links": [ { "title": "fandulu/DD-Net", "url": "https://github.com/fandulu/DD-Net" } ], "date": "2019-07-23", "date2": 20190723, "model": "DD-Net", "paper": { "title": "Make Skeleton-based Action Recognition Model Smaller, Faster and Better", "url": "https://cknow.io/lib/f3391cd265194e89" }, "paper_data_uoa": "f3391cd265194e89" }, { "Accuracy (RGB+pose)": "-", "Accuracy (pose)": 65.5, "code_links": [ { "title": "noboevbo/ehpi_action_recognition", "url": "https://github.com/noboevbo/ehpi_action_recognition" } ], "date": "2019-04-19", "date2": 20190419, "model": "EHPI", "paper": { "title": "Simple yet efficient real-time pose-based action recognition", "url": "https://cknow.io/lib/b09329a44e548118" }, "paper_data_uoa": "b09329a44e548118" } ]