{ "meta": { "info": "SOTA: Validating MLPerf inference benchmark v0.5 results (open, Research, Development, and Other).", "scenario": "universal", "scenario_uid": "3bf7371412455a8f", "title": "SOTA: MLPerf inference benchmark v0.5 results snapshot (open, Research, Development, and Other) for collaborative validation", "viz_engine": "ck_beta" }, "notes": "Benchmark results (performance): P_*_SS - Single Stream in milliseconds, P_*_MS - MultiStream in no. streams, P_*_S - Server in QPS, P_*_O - Offline in inputs/second.
Benchmark results (accuracy): A_IC* - Top-1, A_OD* - mAP, A_NMT* - BLEU.
cK components: packages, software detection plugins.
Image Classification: IC1 - ImageNet, MobileNet-v1, IC2 - ImageNet, ResNet-50 v1.5.
Object detection: OD1 - COCO, SSD w/ MobileNet-v1, OD2 - COCO 1200x1200, SSD w/ ResNet-34.
Translation: NMT - WMT E-G, NMT.
Form Factor: FF_M - Mobile/Handheld, FF_D - Desktop/Workstation, FF_S - Server, FF_E - Edge/Embedded.
These are not official results but a snapshot to collaboratively reproduce results and add portable workflows!
MLPerf name and logo are trademarks. See www.mlperf.org for more information.", "related_components": { "paper": { "title": "MLPerf Inference Benchmark", "uid": "d0e50ebb5b9d4ec9" }, "solutions": [ { "name": "1", "uid": "1863f39cdaef4539" }, { "name": "2", "uid": "bdafb75d4404ad0f" }, { "name": "3", "uid": "c4ebcb732f9dee76" }, { "name": "4", "uid": "671004c70aaea939" }, { "name": "5", "uid": "6435b0db54876aac" }, { "name": "6", "uid": "019c4f9f9acc8ff6" }, { "name": "7", "uid": "4baf94abf6d1f8db" } ] }, "source": "https://mlperf.org/inference-overview/#overview", "tags": [ "result", "benchmarking", "reproducible-benchmarking", "crowd-benchmarking", "reproduced-results", "imported-ck-results", "mlperf", "mlperf-inference", "mlperf-inference-v0.5", "mlperf-inference-v0.5-snapshot", "viz-engine-ck-beta" ] }