[ { "mIoU": 59.36, "code_links": [ { "title": "PeikeLi/Self-Correction-Human-Parsing", "url": "https://github.com/PeikeLi/Self-Correction-Human-Parsing" } ], "date": "2019-10-22", "date2": 20191022, "model": "SCHP (ResNet-101)", "paper": { "title": "Self-Correction for Human Parsing", "url": "https://cknow.io/lib/67ae69ceeb5d5c1e" }, "paper_data_uoa": "67ae69ceeb5d5c1e" }, { "mIoU": 56.65, "code_links": [ { "title": "HRNet/HRNet-Semantic-Segmentation", "url": "https://github.com/HRNet/HRNet-Semantic-Segmentation" }, { "title": "openseg-group/OCNet.pytorch", "url": "https://github.com/openseg-group/OCNet.pytorch" }, { "title": "PkuRainBow/OCNet", "url": "https://github.com/PkuRainBow/OCNet" }, { "title": "PkuRainBow/OCNet.pytorch", "url": "https://github.com/PkuRainBow/OCNet.pytorch" }, { "title": "openseg-group/openseg.pytorch", "url": "https://github.com/openseg-group/openseg.pytorch" }, { "title": "rosinality/ocr-pytorch", "url": "https://github.com/rosinality/ocr-pytorch" } ], "date": "2019-09-24", "date2": 20190924, "model": "OCR (HRNetV2-W48)", "paper": { "title": "Object-Contextual Representations for Semantic Segmentation", "url": "https://cknow.io/lib/2425114c988834de" }, "paper_data_uoa": "2425114c988834de" }, { "mIoU": 55.9, "code_links": [ { "title": "CSAILVision/semantic-segmentation-pytorch", "url": "https://github.com/CSAILVision/semantic-segmentation-pytorch" }, { "title": "leoxiaobin/deep-high-resolution-net.pytorch", "url": "https://github.com/leoxiaobin/deep-high-resolution-net.pytorch" }, { "title": "HRNet/HRNet-Semantic-Segmentation", "url": "https://github.com/HRNet/HRNet-Semantic-Segmentation" }, { "title": "HRNet/HRNet-Object-Detection", "url": "https://github.com/HRNet/HRNet-Object-Detection" }, { "title": "HRNet/HRNet-Image-Classification", "url": "https://github.com/HRNet/HRNet-Image-Classification" }, { "title": "HRNet/HRNet-Facial-Landmark-Detection", "url": "https://github.com/HRNet/HRNet-Facial-Landmark-Detection" }, { "title": "HRNet/HRNet-MaskRCNN-Benchmark", "url": "https://github.com/HRNet/HRNet-MaskRCNN-Benchmark" }, { "title": "HRNet/HRNet-FCOS", "url": "https://github.com/HRNet/HRNet-FCOS" }, { "title": "yuanyuanli85/tf-hrnet", "url": "https://github.com/yuanyuanli85/tf-hrnet" }, { "title": "strivebo/image_segmentation_dl", "url": "https://github.com/strivebo/image_segmentation_dl" }, { "title": "shijianjian/HRNet_Keras", "url": "https://github.com/shijianjian/HRNet_Keras" }, { "title": "kukby/Mish-semantic-segmentation-pytorch", "url": "https://github.com/kukby/Mish-semantic-segmentation-pytorch" }, { "title": "laowang666888/HRNET", "url": "https://github.com/laowang666888/HRNET" } ], "date": "2019-04-09", "date2": 20190409, "model": "HRNetV2 (HRNetV2-W48)", "paper": { "title": "High-Resolution Representations for Labeling Pixels and Regions", "url": "https://cknow.io/lib/d37746e388d1d829" }, "paper_data_uoa": "d37746e388d1d829" }, { "mIoU": 55.6, "code_links": [ { "title": "HRNet/HRNet-Semantic-Segmentation", "url": "https://github.com/HRNet/HRNet-Semantic-Segmentation" }, { "title": "openseg-group/OCNet.pytorch", "url": "https://github.com/openseg-group/OCNet.pytorch" }, { "title": "PkuRainBow/OCNet", "url": "https://github.com/PkuRainBow/OCNet" }, { "title": "PkuRainBow/OCNet.pytorch", "url": "https://github.com/PkuRainBow/OCNet.pytorch" }, { "title": "openseg-group/openseg.pytorch", "url": "https://github.com/openseg-group/openseg.pytorch" }, { "title": "rosinality/ocr-pytorch", "url": "https://github.com/rosinality/ocr-pytorch" } ], "date": "2019-09-24", "date2": 20190924, "model": "OCR (ResNet-101)", "paper": { "title": "Object-Contextual Representations for Semantic Segmentation", "url": "https://cknow.io/lib/2425114c988834de" }, "paper_data_uoa": "2425114c988834de" }, { "mIoU": 53.1, "code_links": [ { "title": "liutinglt/CE2P", "url": "https://github.com/liutinglt/CE2P" }, { "title": "Liuyixuan95/DORN", "url": "https://github.com/Liuyixuan95/DORN" } ], "date": "2018-09-17", "date2": 20180917, "model": "CE2P (ResNet-101)", "paper": { "title": "Devil in the Details: Towards Accurate Single and Multiple Human Parsing", "url": "https://cknow.io/lib/83443f200a4416f8" }, "paper_data_uoa": "83443f200a4416f8" }, { "mIoU": 51.37, "code_links": [ { "title": "andrewjong/SwapNet", "url": "https://github.com/andrewjong/SwapNet" } ], "date": "2018-04-05", "date2": 20180405, "model": "JPPNet (ResNet-101)", "paper": { "title": "Look into Person: Joint Body Parsing & Pose Estimation Network and A New Benchmark", "url": "https://cknow.io/lib/f8547038f0be559f" }, "paper_data_uoa": "f8547038f0be559f" }, { "mIoU": 46.81, "code_links": [ { "title": "RoyalVane/MMAN", "url": "https://github.com/RoyalVane/MMAN" } ], "date": "2018-07-22", "date2": 20180722, "model": "MMAN (ResNet-101)", "paper": { "title": "Macro-Micro Adversarial Network for Human Parsing", "url": "https://cknow.io/lib/c4b1f79ac0930ea7" }, "paper_data_uoa": "c4b1f79ac0930ea7" }, { "mIoU": 44.73, "code_links": [], "date": "2017-03-16", "date2": 20170316, "model": "Attention+SSL (ResNet-101)", "paper": { "title": "Look into Person: Self-supervised Structure-sensitive Learning and A New Benchmark for Human Parsing", "url": "https://cknow.io/lib/d9fefa64282b5339" }, "paper_data_uoa": "d9fefa64282b5339" } ]