[ { "50.00 percentile latency (ns)": 46006560, "90.00 percentile latency (ns)": 79531177, "95.00 percentile latency (ns)": 90557306, "97.00 percentile latency (ns)": 100908026, "99.00 percentile latency (ns)": 123279906, "99.90 percentile latency (ns)": 162379037, "Completed samples per second": 139.97, "Max latency (ns)": 440647357, "Mean latency (ns)": 51303021, "Min duration satisfied": "Yes", "Min latency (ns)": 14156705, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "BERT Server", "Scenario": "server", "Scheduled samples per second": 139.97, "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.scheduled_queries_per_second": 139.97, "characteristics.scheduled_queries_per_second.normalized_per_core": 1.749625, "characteristics.scheduled_queries_per_second.normalized_per_processor": 69.985, "ck_system": "1-node-2S-ICX-PyTorch-INT8", "ck_used": false, "cooling": "Air", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.scheduled_queries_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "PyTorch", "host_memory_capacity": "1024GB", "host_memory_configuration": "8 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 40, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380 CPU @ 2.30GHz", "host_processors_per_node": 2, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "Intel Devlopment Platform (2U Server)", "informal_model": "bert-99", "input_data_types": "No change.", "max_async_queries": 0, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 270336, "mlperf_version": 1.1, "normalize_cores": 80, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/Intel/results/1-node-2S-ICX-PyTorch-INT8", "number_of_nodes": 1, "operating_system": "Ubuntu 20.04.1 LTS", "other_software_stack": "5.4.0-45-generic", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/4792496/files/pytorch_model.bin", "status": "available", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-2S-ICX-PyTorch-INT8", "system_name": "1-node-2S-ICX-PyTorch-INT8", "system_type": "datacenter", "target_latency (ns)": 130000000, "target_qps": 140, "task": "NLP", "task2": "nlp", "total_cores": 80, "uid": "39242c98380d659f", "use_accelerator": false, "weight_data_types": "int8", "weight_transformations": "We load int8 model." }, { "50.00 percentile latency (ns)": 58010486, "90.00 percentile latency (ns)": 89195741, "95.00 percentile latency (ns)": 103330677, "97.00 percentile latency (ns)": 112943736, "99.00 percentile latency (ns)": 127027394, "99.90 percentile latency (ns)": 140868851, "Completed samples per second": 60.14, "Max latency (ns)": 177213261, "Mean latency (ns)": 62611203, "Min duration satisfied": "Yes", "Min latency (ns)": 22490464, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "PySUT", "Scenario": "server", "Scheduled samples per second": 60.14, "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.scheduled_queries_per_second": 60.14, "characteristics.scheduled_queries_per_second.normalized_per_core": 0.2505833333333333, "characteristics.scheduled_queries_per_second.normalized_per_processor": 30.07, "ck_system": "1-node-2S-ICX-MXNet-INT8", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.scheduled_queries_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "MXNet", "host_memory_capacity": "1024GB", "host_memory_configuration": "8 slots / 64GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "", "host_storage_type": "", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "Int8.", "max_async_queries": 0, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 270336, "mlperf_version": 1.0, "normalize_cores": 240, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/Intel/results/1-node-2S-ICX-MXNet-INT8", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "5.4.0-66-generic; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "We don’t retrain the model weight.", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "The original weight filename: https://zenodo.org/record/2592612/files/resnet50_v1.onnx", "status": "preview", "submitter": "Intel", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/Intel", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/1-node-2S-ICX-MXNet-INT8", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 130000000, "target_qps": 60, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "11be41a8924e77c5", "use_accelerator": false, "weight_data_types": "FP32 + Int8", "weight_transformations": "We transfer the weight from fp32 datatype in onnx file to mxnet files then quatize to fp32+int8 datatype file by Intel® Low Precision Optimization Tool." } ]