[ { "50.00 percentile latency (ns)": 513650946, "90.00 percentile latency (ns)": 717124769, "95.00 percentile latency (ns)": 796131391, "97.00 percentile latency (ns)": 852749374, "99.00 percentile latency (ns)": 948889007, "99.90 percentile latency (ns)": 1200988862, "Completed samples per second": 439.58, "Max latency (ns)": 1727589986, "Mean latency (ns)": 535652248, "Min duration satisfied": "Yes", "Min latency (ns)": 214131410, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "PySUT", "Scenario": "server", "Scheduled samples per second": 439.91, "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 91.74727727795441, "characteristics.scheduled_queries_per_second": 439.91, "characteristics.scheduled_queries_per_second.normalized_per_core": 3.9277678571428574, "characteristics.scheduled_queries_per_second.normalized_per_processor": 109.9775, "characteristics.word error rate": 8.252722722045593, "ck_system": "1-node-4S-CPX-PyTorch-MIX", "ck_used": false, "cooling": "Air", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.scheduled_queries_per_second", "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "1536GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380H CPU @ 2.90GHz", "host_processors_per_node": 4, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "Intel Devlopment Platform", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 0, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 270336, "mlperf_version": 1.1, "normalize_cores": 112, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/results/1-node-4S-CPX-PyTorch-MIX", "number_of_nodes": 1, "operating_system": "Ubuntu 20.04.1 LTS", "other_software_stack": "5.4.0-65-generic", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/3662521", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-4S-CPX-PyTorch-MIX", "system_name": "1-node-4S-CPX-PyTorch-MIX", "system_type": "datacenter", "target_latency (ns)": 1000000000, "target_qps": 440, "task": "speech recognition", "task2": "speech recognition", "total_cores": 112, "uid": "a63f3af3d4521bfa", "use_accelerator": false, "weight_data_types": "int8 and bfloat16.", "weight_transformations": "We transform float32 weight into int8 & bfloat16 with IPEX auto-mix-precision." }, { "50.00 percentile latency (ns)": 644787697, "90.00 percentile latency (ns)": 834222206, "95.00 percentile latency (ns)": 861915967, "97.00 percentile latency (ns)": 878071519, "99.00 percentile latency (ns)": 912422350, "99.90 percentile latency (ns)": 1363130253, "Completed samples per second": 998.84, "Max latency (ns)": 1746975926, "Mean latency (ns)": 672146237, "Min duration satisfied": "Yes", "Min latency (ns)": 149717243, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "PySUT", "Scenario": "server", "Scheduled samples per second": 999.47, "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 91.74727727795441, "characteristics.scheduled_queries_per_second": 999.47, "characteristics.scheduled_queries_per_second.normalized_per_core": 4.461919642857143, "characteristics.scheduled_queries_per_second.normalized_per_processor": 124.93375, "characteristics.word error rate": 8.252722722045593, "ck_system": "1-node-8S-CPX-PyTorch-MIX", "ck_used": false, "cooling": "Air", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.scheduled_queries_per_second", "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "3072GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380H CPU @ 2.90GHz", "host_processors_per_node": 8, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "Intel Devlopment Platform", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 0, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 270336, "mlperf_version": 1.1, "normalize_cores": 224, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/results/1-node-8S-CPX-PyTorch-MIX", "number_of_nodes": 1, "operating_system": "Ubuntu 20.04.1 LTS", "other_software_stack": "5.4.0-66-generic", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/3662521", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-8S-CPX-PyTorch-MIX", "system_name": "1-node-8S-CPX-PyTorch-MIX", "system_type": "datacenter", "target_latency (ns)": 1000000000, "target_qps": 1000, "task": "speech recognition", "task2": "speech recognition", "total_cores": 224, "uid": "8ec571f797c46279", "use_accelerator": false, "weight_data_types": "int8 and bfloat16.", "weight_transformations": "We transform float32 weight into int8 & bfloat16 with IPEX auto-mix-precision." }, { "50.00 percentile latency (ns)": 546865977, "90.00 percentile latency (ns)": 783651417, "95.00 percentile latency (ns)": 825885701, "97.00 percentile latency (ns)": 855554978, "99.00 percentile latency (ns)": 929639139, "99.90 percentile latency (ns)": 1610317245, "Completed samples per second": 330.66, "Max latency (ns)": 2009482361, "Mean latency (ns)": 582201285, "Min duration satisfied": "Yes", "Min latency (ns)": 108243401, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "PySUT", "Scenario": "server", "Scheduled samples per second": 330.76, "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.accuracy": 92.54549144287357, "characteristics.scheduled_queries_per_second": 330.76, "characteristics.scheduled_queries_per_second.normalized_per_core": 0.34454166666666663, "characteristics.scheduled_queries_per_second.normalized_per_processor": 41.345, "characteristics.word error rate": 7.45450855712643, "ck_system": "1-node-8S-CPX-PyTorch-BF16", "ck_used": true, "cooling": "", "dataset": "LibriSpeech", "dataset_link": "", "dim_x_default": "characteristics.scheduled_queries_per_second", "dim_y_default": "characteristics.accuracy", "dim_y_maximize": true, "division": "closed", "formal_model": "rnn-t", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "3072GB", "host_memory_configuration": "6 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 8, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "", "informal_model": "rnnt", "input_data_types": "No change.", "key.accuracy": "characteristics.accuracy", "max_async_queries": 0, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 270336, "mlperf_version": 1.0, "normalize_cores": 960, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/results/1-node-8S-CPX-PyTorch-BF16", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "5.4.0-66-generic; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 2513, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/2535873/files/resnet50_v1.pb", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-8S-CPX-PyTorch-BF16", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 1000000000, "target_qps": 330, "task": "speech recognition", "task2": "speech recognition", "total_cores": 960, "uid": "7c54cbbab770ad74", "use_accelerator": false, "weight_data_types": "Google bfloat16.", "weight_transformations": "We transform float32 weight into bfloat16 using TensorFlow's Cast operation." } ]