[ { "50.00 percentile latency (ns)": 722891972326, "90.00 percentile latency (ns)": 954515573545, "95.00 percentile latency (ns)": 971273055826, "97.00 percentile latency (ns)": 978224776449, "99.00 percentile latency (ns)": 983682689176, "99.90 percentile latency (ns)": 985434385040, "Max latency (ns)": 986271782867, "Mean latency (ns)": 649606345028, "Min duration satisfied": "Yes", "Min latency (ns)": 49686331359, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "PySUT", "Samples per second": 2007.56, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 91.74727727795441, "characteristics.samples_per_second": 2007.56, "characteristics.samples_per_second.normalized_per_core": 17.924642857142857, "characteristics.samples_per_second.normalized_per_processor": 501.89, "characteristics.word error rate": 8.252722722045593, "ck_system": "1-node-4S-CPX-PyTorch-MIX", "ck_used": false, "cooling": "Air", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.samples_per_second", "dim_x_maximize": true, "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "1536GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380H CPU @ 2.90GHz", "host_processors_per_node": 4, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "Intel Devlopment Platform", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "normalize_cores": 112, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/results/1-node-4S-CPX-PyTorch-MIX", "number_of_nodes": 1, "operating_system": "Ubuntu 20.04.1 LTS", "other_software_stack": "5.4.0-65-generic", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1980000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/3662521", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-4S-CPX-PyTorch-MIX", "system_name": "1-node-4S-CPX-PyTorch-MIX", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3000, "task": "speech recognition", "task2": "speech recognition", "total_cores": 112, "uid": "6474089b04bacfe2", "use_accelerator": false, "weight_data_types": "int8 and bfloat16.", "weight_transformations": "We transform float32 weight into int8 & bfloat16 with IPEX auto-mix-precision." }, { "50.00 percentile latency (ns)": 595155384359, "90.00 percentile latency (ns)": 786294813636, "95.00 percentile latency (ns)": 801554013919, "97.00 percentile latency (ns)": 806626800917, "99.00 percentile latency (ns)": 810801190519, "99.90 percentile latency (ns)": 811986234978, "Max latency (ns)": 812151660133, "Mean latency (ns)": 538509377298, "Min duration satisfied": "Yes", "Min latency (ns)": 49812777359, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "PySUT", "Samples per second": 4063.28, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 91.74727727795441, "characteristics.samples_per_second": 4063.28, "characteristics.samples_per_second.normalized_per_core": 18.139642857142857, "characteristics.samples_per_second.normalized_per_processor": 507.91, "characteristics.word error rate": 8.252722722045593, "ck_system": "1-node-8S-CPX-PyTorch-MIX", "ck_used": false, "cooling": "Air", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.samples_per_second", "dim_x_maximize": true, "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "3072GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380H CPU @ 2.90GHz", "host_processors_per_node": 8, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "Intel Devlopment Platform", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "normalize_cores": 224, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/results/1-node-8S-CPX-PyTorch-MIX", "number_of_nodes": 1, "operating_system": "Ubuntu 20.04.1 LTS", "other_software_stack": "5.4.0-66-generic", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 3300000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/3662521", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-8S-CPX-PyTorch-MIX", "system_name": "1-node-8S-CPX-PyTorch-MIX", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5000, "task": "speech recognition", "task2": "speech recognition", "total_cores": 224, "uid": "bb3da90a040822dc", "use_accelerator": false, "weight_data_types": "int8 and bfloat16.", "weight_transformations": "We transform float32 weight into int8 & bfloat16 with IPEX auto-mix-precision." }, { "50.00 percentile latency (ns)": 881192612370, "90.00 percentile latency (ns)": 902013672361, "95.00 percentile latency (ns)": 904521350299, "97.00 percentile latency (ns)": 905511891923, "99.00 percentile latency (ns)": 906495451631, "99.90 percentile latency (ns)": 906935174107, "Max latency (ns)": 906986964943, "Mean latency (ns)": 792244746347, "Min duration satisfied": "Yes", "Min latency (ns)": 81274637132, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "PySUT", "Samples per second": 2183.05, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 92.53872691605223, "characteristics.samples_per_second": 2183.05, "characteristics.samples_per_second.normalized_per_core": 2.274010416666667, "characteristics.samples_per_second.normalized_per_processor": 272.88125, "characteristics.word error rate": 7.461273083947778, "ck_system": "1-node-8S-CPX-PyTorch-BF16", "ck_used": true, "cooling": "", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.samples_per_second", "dim_x_maximize": true, "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "3072GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 8, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "normalize_cores": 960, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/results/1-node-8S-CPX-PyTorch-BF16", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "5.4.0-66-generic; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1980000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/2535873/files/resnet50_v1.pb", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-8S-CPX-PyTorch-BF16", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3000, "task": "speech recognition", "task2": "speech recognition", "total_cores": 960, "uid": "4b0da4084a5d8140", "use_accelerator": false, "weight_data_types": "Google bfloat16.", "weight_transformations": "We transform float32 weight into bfloat16 using TensorFlow's Cast operation." } ]