[ { "50.00 percentile latency (ns)": 424035133422, "90.00 percentile latency (ns)": 627049879013, "95.00 percentile latency (ns)": 645142014494, "97.00 percentile latency (ns)": 651175251111, "99.00 percentile latency (ns)": 656098606408, "99.90 percentile latency (ns)": 657813520329, "Max latency (ns)": 657979181584, "Mean latency (ns)": 398537996153, "Min duration satisfied": "Yes", "Min latency (ns)": 2016512664, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 8827.03, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 8827.03, "characteristics.samples_per_second.normalized_per_core": 1103.37875, "characteristics.samples_per_second.normalized_per_processor": 1103.37875, "ck_system": "A10x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5808000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8800, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "69cc139b07943d9a", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 429103352928, "90.00 percentile latency (ns)": 634635403811, "95.00 percentile latency (ns)": 652870643450, "97.00 percentile latency (ns)": 658877230362, "99.00 percentile latency (ns)": 663675080570, "99.90 percentile latency (ns)": 665281508902, "Max latency (ns)": 665405795468, "Mean latency (ns)": 402986690532, "Min duration satisfied": "Yes", "Min latency (ns)": 1784553540, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 4221.42, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 4221.42, "characteristics.samples_per_second.normalized_per_core": 527.6775, "characteristics.samples_per_second.normalized_per_processor": 527.6775, "ck_system": "A10x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 2808960, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4256, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "4cb64080bb050e8b", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 457021759870, "90.00 percentile latency (ns)": 676039906653, "95.00 percentile latency (ns)": 695086416103, "97.00 percentile latency (ns)": 701585599710, "99.00 percentile latency (ns)": 709948075841, "99.90 percentile latency (ns)": 717246864798, "Max latency (ns)": 719708529806, "Mean latency (ns)": 430040731321, "Min duration satisfied": "Yes", "Min latency (ns)": 603241137, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 25677.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (7x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 25677.1, "characteristics.samples_per_second.normalized_per_core": 3209.6375, "characteristics.samples_per_second.normalized_per_processor": 3209.6375, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 18480000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB-MIG-7x1g.10gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 28000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "fd42c7835c9d66d5", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 423228653462, "90.00 percentile latency (ns)": 624739183882, "95.00 percentile latency (ns)": 642637638031, "97.00 percentile latency (ns)": 648627682165, "99.00 percentile latency (ns)": 655906450496, "99.90 percentile latency (ns)": 662324227571, "Max latency (ns)": 664582085450, "Mean latency (ns)": 397839127262, "Min duration satisfied": "Yes", "Min latency (ns)": 627717222, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 12513.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (7x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12513.1, "characteristics.samples_per_second.normalized_per_core": 1564.1375, "characteristics.samples_per_second.normalized_per_processor": 1564.1375, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8316000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB-MIG-7x1g.10gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12600, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "b99fa2f0f76670d5", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 447595195892, "90.00 percentile latency (ns)": 663050199036, "95.00 percentile latency (ns)": 682295036425, "97.00 percentile latency (ns)": 688602864608, "99.00 percentile latency (ns)": 693736865082, "99.90 percentile latency (ns)": 695560468319, "Max latency (ns)": 695695252514, "Mean latency (ns)": 421223091407, "Min duration satisfied": "Yes", "Min latency (ns)": 2990123495, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 11384.3, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11384.3, "characteristics.samples_per_second.normalized_per_core": 2846.075, "characteristics.samples_per_second.normalized_per_processor": 2846.075, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 7920000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "423c48b6b3162123", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 435918222916, "90.00 percentile latency (ns)": 644240079493, "95.00 percentile latency (ns)": 662672377728, "97.00 percentile latency (ns)": 668634105152, "99.00 percentile latency (ns)": 673434750666, "99.90 percentile latency (ns)": 675100372255, "Max latency (ns)": 675138067700, "Mean latency (ns)": 409394919975, "Min duration satisfied": "Yes", "Min latency (ns)": 2866848540, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 5572.19, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 5572.19, "characteristics.samples_per_second.normalized_per_core": 1393.0475, "characteristics.samples_per_second.normalized_per_processor": 1393.0475, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 3762000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_Triton", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5700, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "6b59d744eb4cf4aa", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 519976153157, "90.00 percentile latency (ns)": 768882933452, "95.00 percentile latency (ns)": 790585269980, "97.00 percentile latency (ns)": 797616676171, "99.00 percentile latency (ns)": 805943222366, "99.90 percentile latency (ns)": 821188837752, "Max latency (ns)": 823960819764, "Mean latency (ns)": 489100061308, "Min duration satisfied": "Yes", "Min latency (ns)": 713605504, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 12867.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (4x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12867.4, "characteristics.samples_per_second.normalized_per_core": 1608.425, "characteristics.samples_per_second.normalized_per_processor": 1608.425, "ck_system": "A30-MIG_32x1g.6gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_32x1g.6gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 10602240, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_32x1g.6gb_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30-MIG-4x1g.6gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 16064, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "02d85ba89087a6ba", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 520143496185, "90.00 percentile latency (ns)": 766882485775, "95.00 percentile latency (ns)": 788860938413, "97.00 percentile latency (ns)": 796002584022, "99.00 percentile latency (ns)": 803995153263, "99.90 percentile latency (ns)": 819062891300, "Max latency (ns)": 821459324831, "Mean latency (ns)": 488594518923, "Min duration satisfied": "Yes", "Min latency (ns)": 830312103, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 6170.48, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (4x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6170.48, "characteristics.samples_per_second.normalized_per_core": 771.31, "characteristics.samples_per_second.normalized_per_processor": 771.31, "ck_system": "A30-MIG_32x1g.6gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_32x1g.6gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5068800, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_32x1g.6gb_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30-MIG-4x1g.6gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 7680, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "ef1cf6094304682b", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 462778731913, "90.00 percentile latency (ns)": 679915370047, "95.00 percentile latency (ns)": 698719072020, "97.00 percentile latency (ns)": 705032502217, "99.00 percentile latency (ns)": 710129836173, "99.90 percentile latency (ns)": 711938148096, "Max latency (ns)": 712066083563, "Mean latency (ns)": 434938861779, "Min duration satisfied": "Yes", "Min latency (ns)": 4423918707, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 12605.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12605.6, "characteristics.samples_per_second.normalized_per_core": 3151.4, "characteristics.samples_per_second.normalized_per_processor": 3151.4, "ck_system": "A100-PCIe-80GB_aarch64x4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.2, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 80, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Ampere Altra Q80-30", "host_processors_per_node": 1, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GB_aarch64x4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.2, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8976000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GB_aarch64x4_TRT", "system_name": "Gigabyte G242-P31 (4x A100-PCIe-80GB_aarch64, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 13600, "task": "NLP", "task2": "nlp", "total_cores": 80, "uid": "f44638b0da132ebc", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 562317189898, "90.00 percentile latency (ns)": 826828119567, "95.00 percentile latency (ns)": 849908766950, "97.00 percentile latency (ns)": 857640536177, "99.00 percentile latency (ns)": 863576152687, "99.90 percentile latency (ns)": 865541183832, "Max latency (ns)": 865635796874, "Mean latency (ns)": 528133130153, "Min duration satisfied": "Yes", "Min latency (ns)": 4668731920, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 6252.05, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6252.05, "characteristics.samples_per_second.normalized_per_core": 1563.0125, "characteristics.samples_per_second.normalized_per_processor": 1563.0125, "ck_system": "A100-PCIe-80GB_aarch64x4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.2, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 80, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Ampere Altra Q80-30", "host_processors_per_node": 1, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GB_aarch64x4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.2, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5412000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GB_aarch64x4_TRT", "system_name": "Gigabyte G242-P31 (4x A100-PCIe-80GB_aarch64, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8200, "task": "NLP", "task2": "nlp", "total_cores": 80, "uid": "9a008478ba3e3238", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 416656896065, "90.00 percentile latency (ns)": 612265150079, "95.00 percentile latency (ns)": 629149884369, "97.00 percentile latency (ns)": 634765840423, "99.00 percentile latency (ns)": 639335127765, "99.90 percentile latency (ns)": 640904365459, "Max latency (ns)": 641031095707, "Mean latency (ns)": 391840032903, "Min duration satisfied": "Yes", "Min latency (ns)": 4891332252, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 13384.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13384.7, "characteristics.samples_per_second.normalized_per_core": 1673.0875, "characteristics.samples_per_second.normalized_per_processor": 1673.0875, "ck_system": "A30x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8580000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 13000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "feb274ac14309b82", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 529289177858, "90.00 percentile latency (ns)": 777553690407, "95.00 percentile latency (ns)": 799549626872, "97.00 percentile latency (ns)": 806662039347, "99.00 percentile latency (ns)": 812409645199, "99.90 percentile latency (ns)": 814249602530, "Max latency (ns)": 814361152186, "Mean latency (ns)": 497250967720, "Min duration satisfied": "Yes", "Min latency (ns)": 6869586786, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 6580.86, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6580.86, "characteristics.samples_per_second.normalized_per_core": 822.6075, "characteristics.samples_per_second.normalized_per_processor": 822.6075, "ck_system": "A30x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5359199, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8120, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "1c827740eb14d44b", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 412659014168, "90.00 percentile latency (ns)": 607496207550, "95.00 percentile latency (ns)": 624513493078, "97.00 percentile latency (ns)": 630218834189, "99.00 percentile latency (ns)": 634885141380, "99.90 percentile latency (ns)": 636487385507, "Max latency (ns)": 636631515171, "Mean latency (ns)": 387991606372, "Min duration satisfied": "Yes", "Min latency (ns)": 3556127847, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 13477.2, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13477.2, "characteristics.samples_per_second.normalized_per_core": 1684.65, "characteristics.samples_per_second.normalized_per_processor": 1684.65, "ck_system": "A30x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8580000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 13000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "ea4f8e46c88e4a0f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 527984278621, "90.00 percentile latency (ns)": 775956280719, "95.00 percentile latency (ns)": 797890641923, "97.00 percentile latency (ns)": 804931754407, "99.00 percentile latency (ns)": 810826370493, "99.90 percentile latency (ns)": 812519847254, "Max latency (ns)": 812767442553, "Mean latency (ns)": 495753616777, "Min duration satisfied": "Yes", "Min latency (ns)": 4094189404, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 6593.77, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6593.77, "characteristics.samples_per_second.normalized_per_core": 824.22125, "characteristics.samples_per_second.normalized_per_processor": 824.22125, "ck_system": "A30x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5359199, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8120, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "69448c7824bb6873", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 456223131274, "90.00 percentile latency (ns)": 669867933631, "95.00 percentile latency (ns)": 688368579346, "97.00 percentile latency (ns)": 694608852748, "99.00 percentile latency (ns)": 699577161186, "99.90 percentile latency (ns)": 701333775747, "Max latency (ns)": 701489158594, "Mean latency (ns)": 428916067506, "Min duration satisfied": "Yes", "Min latency (ns)": 6027671198, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 25591.3, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 25591.3, "characteristics.samples_per_second.normalized_per_core": 3198.9125, "characteristics.samples_per_second.normalized_per_processor": 3198.9125, "ck_system": "A100-PCIe-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 17952000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27200, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "892c3c47dd8a1c1c", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 429217609845, "90.00 percentile latency (ns)": 630707129454, "95.00 percentile latency (ns)": 648245394228, "97.00 percentile latency (ns)": 654070407314, "99.00 percentile latency (ns)": 658666826845, "99.90 percentile latency (ns)": 660113355590, "Max latency (ns)": 660227409379, "Mean latency (ns)": 402906842101, "Min duration satisfied": "Yes", "Min latency (ns)": 4916993094, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 12795.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12795.6, "characteristics.samples_per_second.normalized_per_core": 1599.45, "characteristics.samples_per_second.normalized_per_processor": 1599.45, "ck_system": "A100-PCIe-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8448000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12800, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "4fd7a17d36a748bb", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 571639555533, "90.00 percentile latency (ns)": 839908807083, "95.00 percentile latency (ns)": 863539825338, "97.00 percentile latency (ns)": 871340953881, "99.00 percentile latency (ns)": 877623520756, "99.90 percentile latency (ns)": 879785972347, "Max latency (ns)": 879972059471, "Mean latency (ns)": 537313484680, "Min duration satisfied": "Yes", "Min latency (ns)": 6415947433, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 20400.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 2489.721590909091, "characteristics.power.normalized_per_core": 311.2151988636364, "characteristics.power.normalized_per_processor": 311.2151988636364, "characteristics.samples_per_second": 20400.6, "characteristics.samples_per_second.normalized_per_core": 2550.075, "characteristics.samples_per_second.normalized_per_processor": 2550.075, "ck_system": "A100-PCIex8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 17952000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_MaxQ", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27200, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "9444627899d14ad6", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 474960477196, "90.00 percentile latency (ns)": 699976145937, "95.00 percentile latency (ns)": 720044867158, "97.00 percentile latency (ns)": 726475163724, "99.00 percentile latency (ns)": 731573978858, "99.90 percentile latency (ns)": 733252206962, "Max latency (ns)": 733444259372, "Mean latency (ns)": 446082038246, "Min duration satisfied": "Yes", "Min latency (ns)": 5303273895, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 10049.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 2493.4058583106257, "characteristics.power.normalized_per_core": 311.6757322888282, "characteristics.power.normalized_per_processor": 311.6757322888282, "characteristics.samples_per_second": 10049.7, "characteristics.samples_per_second.normalized_per_core": 1256.2125, "characteristics.samples_per_second.normalized_per_processor": 1256.2125, "ck_system": "A100-PCIex8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 7370880, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_MaxQ", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 11168, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "fd50d493ca02a019", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 453584279933, "90.00 percentile latency (ns)": 667593830428, "95.00 percentile latency (ns)": 686297684526, "97.00 percentile latency (ns)": 692518160637, "99.00 percentile latency (ns)": 697636325228, "99.90 percentile latency (ns)": 699448719874, "Max latency (ns)": 699593502966, "Mean latency (ns)": 426909377472, "Min duration satisfied": "Yes", "Min latency (ns)": 6036919674, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 28302.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 28302.1, "characteristics.samples_per_second.normalized_per_core": 3537.7625, "characteristics.samples_per_second.normalized_per_processor": 3537.7625, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 19800000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 30000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "b418c96b1d00e5da", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 458853105904, "90.00 percentile latency (ns)": 674783227128, "95.00 percentile latency (ns)": 693867033967, "97.00 percentile latency (ns)": 700097404596, "99.00 percentile latency (ns)": 705075994950, "99.90 percentile latency (ns)": 706775299659, "Max latency (ns)": 706949627569, "Mean latency (ns)": 431133020210, "Min duration satisfied": "Yes", "Min latency (ns)": 3262365816, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 14003.8, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 14003.8, "characteristics.samples_per_second.normalized_per_core": 1750.475, "characteristics.samples_per_second.normalized_per_processor": 1750.475, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 9900000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "5e2446637e462d84", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 448917475501, "90.00 percentile latency (ns)": 666115722805, "95.00 percentile latency (ns)": 685458874142, "97.00 percentile latency (ns)": 691811596045, "99.00 percentile latency (ns)": 696911603510, "99.90 percentile latency (ns)": 698676458339, "Max latency (ns)": 698833911971, "Mean latency (ns)": 422807470281, "Min duration satisfied": "Yes", "Min latency (ns)": 3170009516, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 11333.2, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11333.2, "characteristics.samples_per_second.normalized_per_core": 2833.3, "characteristics.samples_per_second.normalized_per_processor": 2833.3, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 7920000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "92042ef4f60880ed", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 437407232959, "90.00 percentile latency (ns)": 646248675430, "95.00 percentile latency (ns)": 664990403708, "97.00 percentile latency (ns)": 670940499486, "99.00 percentile latency (ns)": 675754605495, "99.90 percentile latency (ns)": 677412149136, "Max latency (ns)": 677521416747, "Mean latency (ns)": 410760917420, "Min duration satisfied": "Yes", "Min latency (ns)": 2968766608, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 5552.59, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 5552.59, "characteristics.samples_per_second.normalized_per_core": 1388.1475, "characteristics.samples_per_second.normalized_per_processor": 1388.1475, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 3762000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5700, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "4ed14dbe9b2cf50d", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 424432991602, "90.00 percentile latency (ns)": 628284404322, "95.00 percentile latency (ns)": 646188992664, "97.00 percentile latency (ns)": 652198275672, "99.00 percentile latency (ns)": 657077147818, "99.90 percentile latency (ns)": 658887342633, "Max latency (ns)": 659023985071, "Mean latency (ns)": 399265157461, "Min duration satisfied": "Yes", "Min latency (ns)": 506129469, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 458.335, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 458.335, "characteristics.samples_per_second.normalized_per_core": 458.335, "characteristics.samples_per_second.normalized_per_processor": 458.335, "ck_system": "A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 302054, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "system_name": "Gigabyte G482-Z54 (1x A30-MIG-1x1g.6gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 457.658, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "88470a2cb4ee181b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 426212456292, "90.00 percentile latency (ns)": 628325133607, "95.00 percentile latency (ns)": 646396306610, "97.00 percentile latency (ns)": 652361180702, "99.00 percentile latency (ns)": 657172763573, "99.90 percentile latency (ns)": 658890315867, "Max latency (ns)": 658982878878, "Mean latency (ns)": 400203711327, "Min duration satisfied": "Yes", "Min latency (ns)": 936820399, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 219.517, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 219.517, "characteristics.samples_per_second.normalized_per_core": 219.517, "characteristics.samples_per_second.normalized_per_processor": 219.517, "ck_system": "A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 144658, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.6gb_TRT_HeteroMultiUse", "system_name": "Gigabyte G482-Z54 (1x A30-MIG-1x1g.6gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 219.18, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "44447d48c93a60e0", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 431403985584, "90.00 percentile latency (ns)": 636327487355, "95.00 percentile latency (ns)": 654139318054, "97.00 percentile latency (ns)": 660164325459, "99.00 percentile latency (ns)": 665067140964, "99.90 percentile latency (ns)": 666828351569, "Max latency (ns)": 666966724524, "Mean latency (ns)": 405570629826, "Min duration satisfied": "Yes", "Min latency (ns)": 642707274, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 494.777, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 494.777, "characteristics.samples_per_second.normalized_per_core": 494.777, "characteristics.samples_per_second.normalized_per_processor": 494.777, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 330000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 500, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "094c7db7d98f1ca4", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 398660015962, "90.00 percentile latency (ns)": 585577443594, "95.00 percentile latency (ns)": 602192199372, "97.00 percentile latency (ns)": 607644754359, "99.00 percentile latency (ns)": 612263789696, "99.90 percentile latency (ns)": 613756690663, "Max latency (ns)": 613867670281, "Mean latency (ns)": 374378679437, "Min duration satisfied": "Yes", "Min latency (ns)": 1202124315, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 241.909, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 241.909, "characteristics.samples_per_second.normalized_per_core": 241.909, "characteristics.samples_per_second.normalized_per_processor": 241.909, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 148500, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 225, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "04b46514188ad600", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 402992112848, "90.00 percentile latency (ns)": 595263146817, "95.00 percentile latency (ns)": 612429252678, "97.00 percentile latency (ns)": 618031514814, "99.00 percentile latency (ns)": 622572807677, "99.90 percentile latency (ns)": 624113112603, "Max latency (ns)": 624228513003, "Mean latency (ns)": 378866357295, "Min duration satisfied": "Yes", "Min latency (ns)": 2944124226, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 10573.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 1243.4033653846138, "characteristics.power.normalized_per_core": 310.85084134615346, "characteristics.power.normalized_per_processor": 310.85084134615346, "characteristics.samples_per_second": 10573.1, "characteristics.samples_per_second.normalized_per_core": 2643.275, "characteristics.samples_per_second.normalized_per_processor": 2643.275, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 6600000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 10000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "a264b4b822fc1b34", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 412297717790, "90.00 percentile latency (ns)": 608940680666, "95.00 percentile latency (ns)": 626359888812, "97.00 percentile latency (ns)": 631907028883, "99.00 percentile latency (ns)": 636525244943, "99.90 percentile latency (ns)": 638069395520, "Max latency (ns)": 638250053505, "Mean latency (ns)": 387293965153, "Min duration satisfied": "Yes", "Min latency (ns)": 3045293877, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 5170.39, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 1243.3104851330208, "characteristics.power.normalized_per_core": 310.8276212832552, "characteristics.power.normalized_per_processor": 310.8276212832552, "characteristics.samples_per_second": 5170.39, "characteristics.samples_per_second.normalized_per_core": 1292.5975, "characteristics.samples_per_second.normalized_per_processor": 1292.5975, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 3300000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "7887444e9e20a9f5", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 511450833257, "90.00 percentile latency (ns)": 751683517576, "95.00 percentile latency (ns)": 772570056984, "97.00 percentile latency (ns)": 779494653579, "99.00 percentile latency (ns)": 785047393909, "99.90 percentile latency (ns)": 787060255511, "Max latency (ns)": 787233380535, "Mean latency (ns)": 480942385680, "Min duration satisfied": "Yes", "Min latency (ns)": 6327535129, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 22803.9, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 22803.9, "characteristics.samples_per_second.normalized_per_core": 2850.4875, "characteristics.samples_per_second.normalized_per_processor": 2850.4875, "ck_system": "A100-PCIex8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 17952000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27200, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "c1c86614bf17b917", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 422364035765, "90.00 percentile latency (ns)": 621099715992, "95.00 percentile latency (ns)": 638850567504, "97.00 percentile latency (ns)": 644550785730, "99.00 percentile latency (ns)": 649044519558, "99.90 percentile latency (ns)": 650506129348, "Max latency (ns)": 650653142769, "Mean latency (ns)": 396578784981, "Min duration satisfied": "Yes", "Min latency (ns)": 5015248337, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 11328.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11328.4, "characteristics.samples_per_second.normalized_per_core": 1416.05, "characteristics.samples_per_second.normalized_per_processor": 1416.05, "ck_system": "A100-PCIex8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 7370880, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 11168, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "90b54948be2d1e7b", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 501192387884, "90.00 percentile latency (ns)": 740110017417, "95.00 percentile latency (ns)": 761230730764, "97.00 percentile latency (ns)": 768382095123, "99.00 percentile latency (ns)": 774244897855, "99.90 percentile latency (ns)": 776374577715, "Max latency (ns)": 776573674096, "Mean latency (ns)": 471879043099, "Min duration satisfied": "Yes", "Min latency (ns)": 5725183032, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 22947, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 22947, "characteristics.samples_per_second.normalized_per_core": 2868.375, "characteristics.samples_per_second.normalized_per_processor": 2868.375, "ck_system": "A100-PCIex8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 17820000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "ee84b4be3a8d825a", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 562502507903, "90.00 percentile latency (ns)": 827882058322, "95.00 percentile latency (ns)": 851339115739, "97.00 percentile latency (ns)": 859067647951, "99.00 percentile latency (ns)": 865234651245, "99.90 percentile latency (ns)": 867322231237, "Max latency (ns)": 867501525411, "Mean latency (ns)": 528371382422, "Min duration satisfied": "Yes", "Min latency (ns)": 4174881622, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 11412.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11412.1, "characteristics.samples_per_second.normalized_per_core": 1426.5125, "characteristics.samples_per_second.normalized_per_processor": 1426.5125, "ck_system": "A100-PCIex8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 9900000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "74b4d8158693b2eb", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 439712464978, "90.00 percentile latency (ns)": 648858276826, "95.00 percentile latency (ns)": 667335231965, "97.00 percentile latency (ns)": 673558428072, "99.00 percentile latency (ns)": 678696164914, "99.90 percentile latency (ns)": 680545852915, "Max latency (ns)": 680735995575, "Mean latency (ns)": 414313567931, "Min duration satisfied": "Yes", "Min latency (ns)": 6381624161, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 28116.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 28116.6, "characteristics.samples_per_second.normalized_per_core": 3514.575, "characteristics.samples_per_second.normalized_per_processor": 3514.575, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 19140000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 29000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "479bfb6e224a3c8f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 457781357997, "90.00 percentile latency (ns)": 672258855460, "95.00 percentile latency (ns)": 691150005219, "97.00 percentile latency (ns)": 697419660740, "99.00 percentile latency (ns)": 702291351857, "99.90 percentile latency (ns)": 704000971648, "Max latency (ns)": 704153522168, "Mean latency (ns)": 430096376693, "Min duration satisfied": "Yes", "Min latency (ns)": 4212649410, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 14059.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 14059.4, "characteristics.samples_per_second.normalized_per_core": 1757.425, "characteristics.samples_per_second.normalized_per_processor": 1757.425, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 9900000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "f5c112b57e4098a2", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 519294594711, "90.00 percentile latency (ns)": 765741518455, "95.00 percentile latency (ns)": 787422125196, "97.00 percentile latency (ns)": 794633515485, "99.00 percentile latency (ns)": 800467534516, "99.90 percentile latency (ns)": 802511049244, "Max latency (ns)": 802693299357, "Mean latency (ns)": 488604873435, "Min duration satisfied": "Yes", "Min latency (ns)": 6419102837, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 24667, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 3549.2743462017424, "characteristics.power.normalized_per_core": 443.6592932752178, "characteristics.power.normalized_per_processor": 443.6592932752178, "characteristics.samples_per_second": 24667, "characteristics.samples_per_second.normalized_per_core": 3083.375, "characteristics.samples_per_second.normalized_per_processor": 3083.375, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 19800000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 30000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "6a3462183cf840ca", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 527647176998, "90.00 percentile latency (ns)": 778479615759, "95.00 percentile latency (ns)": 800804530306, "97.00 percentile latency (ns)": 808071487816, "99.00 percentile latency (ns)": 813868141956, "99.90 percentile latency (ns)": 815846432782, "Max latency (ns)": 816041625751, "Mean latency (ns)": 495756725557, "Min duration satisfied": "Yes", "Min latency (ns)": 3613116890, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 12131.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 3560.57892156862, "characteristics.power.normalized_per_core": 445.0723651960775, "characteristics.power.normalized_per_processor": 445.0723651960775, "characteristics.samples_per_second": 12131.7, "characteristics.samples_per_second.normalized_per_core": 1516.4625, "characteristics.samples_per_second.normalized_per_processor": 1516.4625, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 9900000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "system_name": "NVIDIA DGX A100 (8x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "402681d056707cdd", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 447103286699, "90.00 percentile latency (ns)": 660279298976, "95.00 percentile latency (ns)": 679168394386, "97.00 percentile latency (ns)": 685529547743, "99.00 percentile latency (ns)": 690770734566, "99.90 percentile latency (ns)": 692670305671, "Max latency (ns)": 692861863436, "Mean latency (ns)": 421138749809, "Min duration satisfied": "Yes", "Min latency (ns)": 5562130588, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 25719.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 25719.4, "characteristics.samples_per_second.normalized_per_core": 3214.925, "characteristics.samples_per_second.normalized_per_processor": 3214.925, "ck_system": "A100-PCIe-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 17820000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "2a89f03012032ae7", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 425307267421, "90.00 percentile latency (ns)": 625041480349, "95.00 percentile latency (ns)": 642746300090, "97.00 percentile latency (ns)": 648557607096, "99.00 percentile latency (ns)": 653168019529, "99.90 percentile latency (ns)": 654735447576, "Max latency (ns)": 654883442903, "Mean latency (ns)": 399474189365, "Min duration satisfied": "Yes", "Min latency (ns)": 3488172781, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 12900, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12900, "characteristics.samples_per_second.normalized_per_core": 1612.5, "characteristics.samples_per_second.normalized_per_processor": 1612.5, "ck_system": "A100-PCIe-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 8448000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12800, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "6399b4fa5426eb8f", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 427997514422, "90.00 percentile latency (ns)": 632423674772, "95.00 percentile latency (ns)": 650506211739, "97.00 percentile latency (ns)": 656557879490, "99.00 percentile latency (ns)": 661406702242, "99.90 percentile latency (ns)": 663077454703, "Max latency (ns)": 663217409592, "Mean latency (ns)": 402204574490, "Min duration satisfied": "Yes", "Min latency (ns)": 2517194484, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 8757.31, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 8757.31, "characteristics.samples_per_second.normalized_per_core": 1094.66375, "characteristics.samples_per_second.normalized_per_processor": 1094.66375, "ck_system": "A10x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 5808000, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8800, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "73ecf5d40c292f91", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 425072350170, "90.00 percentile latency (ns)": 628637016719, "95.00 percentile latency (ns)": 646704496559, "97.00 percentile latency (ns)": 652659102789, "99.00 percentile latency (ns)": 657388747622, "99.90 percentile latency (ns)": 658988808454, "Max latency (ns)": 659151917624, "Mean latency (ns)": 399165160594, "Min duration satisfied": "Yes", "Min latency (ns)": 1731447070, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 4261.48, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 4261.48, "characteristics.samples_per_second.normalized_per_core": 532.685, "characteristics.samples_per_second.normalized_per_processor": 532.685, "ck_system": "A10x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 2808960, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4256, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "7eee9d5842939d27", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 330328886363, "90.00 percentile latency (ns)": 594590784382, "95.00 percentile latency (ns)": 627623677990, "97.00 percentile latency (ns)": 640839655676, "99.00 percentile latency (ns)": 654081088150, "99.90 percentile latency (ns)": 660021189702, "Max latency (ns)": 660651036724, "Mean latency (ns)": 330342734605, "Min duration satisfied": "Yes", "Min latency (ns)": 276931196, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 95.9054, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 95.9054, "characteristics.samples_per_second.normalized_per_core": 0.8562982142857143, "characteristics.samples_per_second.normalized_per_processor": 23.97635, "ck_system": "Triton_CPU_4S_8380Hx1", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "OpenVino 2021", "host_memory_capacity": "1536 GB", "host_memory_configuration": "12 slots / 32GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Gold 8380H CPU @ 2.70GHz", "host_processors_per_node": 4, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "fp32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 112, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/Triton_CPU_4S_8380Hx1", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "Tensorflow 2.4.0, OpenVino 2021, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 63360, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "The original weight filename: bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "CPU Inference on Triton Inference Server", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Triton_CPU_4S_8380Hx1", "system_name": "Supermicro SYS-240P-TNRT (Cooper Lake running Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 96, "task": "NLP", "task2": "nlp", "total_cores": 112, "uid": "c9c0ff422de15e8a", "use_accelerator": false, "weight_data_types": "int8", "weight_transformations": "We transfer the weight from int8 datatype in ONNX file to int8 datatype in OpenVino IR file." }, { "50.00 percentile latency (ns)": 396537528959, "90.00 percentile latency (ns)": 586423674651, "95.00 percentile latency (ns)": 603302847676, "97.00 percentile latency (ns)": 608906512057, "99.00 percentile latency (ns)": 613465581562, "99.90 percentile latency (ns)": 615072800665, "Max latency (ns)": 615228754858, "Mean latency (ns)": 372625157455, "Min duration satisfied": "Yes", "Min latency (ns)": 1882378696, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 8582.17, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 8582.17, "characteristics.samples_per_second.normalized_per_core": 1072.77125, "characteristics.samples_per_second.normalized_per_processor": 1072.77125, "ck_system": "A10x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 5280000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8000, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "6bf28cf16fd46768", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 443867639967, "90.00 percentile latency (ns)": 655855140866, "95.00 percentile latency (ns)": 674737770943, "97.00 percentile latency (ns)": 680919761713, "99.00 percentile latency (ns)": 685850061876, "99.90 percentile latency (ns)": 687522629949, "Max latency (ns)": 687664387763, "Mean latency (ns)": 416793841268, "Min duration satisfied": "Yes", "Min latency (ns)": 1794598071, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 4084.78, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 4084.78, "characteristics.samples_per_second.normalized_per_core": 510.5975, "characteristics.samples_per_second.normalized_per_processor": 510.5975, "ck_system": "A10x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 2808960, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4256, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "c2444f11b4b4df16", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 458043757464, "90.00 percentile latency (ns)": 677518403239, "95.00 percentile latency (ns)": 696528904641, "97.00 percentile latency (ns)": 702964572442, "99.00 percentile latency (ns)": 713389142114, "99.90 percentile latency (ns)": 719584008919, "Max latency (ns)": 721683680664, "Mean latency (ns)": 431081798458, "Min duration satisfied": "Yes", "Min latency (ns)": 696939904, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 25606.8, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (7x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 25606.8, "characteristics.samples_per_second.normalized_per_core": 3200.85, "characteristics.samples_per_second.normalized_per_processor": 3200.85, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 18480000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB-MIG-7x1g.10gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 28000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "4cdcb7637aa81029", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 423563423795, "90.00 percentile latency (ns)": 624887058619, "95.00 percentile latency (ns)": 642665528646, "97.00 percentile latency (ns)": 648455826435, "99.00 percentile latency (ns)": 657646584337, "99.90 percentile latency (ns)": 663246075822, "Max latency (ns)": 664769909508, "Mean latency (ns)": 398102349082, "Min duration satisfied": "Yes", "Min latency (ns)": 508774579, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_MultiMigServer", "Samples per second": 12509.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (7x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 12509.6, "characteristics.samples_per_second.normalized_per_core": 1563.7, "characteristics.samples_per_second.normalized_per_processor": 1563.7, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 8316000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_56x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB-MIG-7x1g.10gb, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12600, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "6dba43c0a10aded5", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 414751262194, "90.00 percentile latency (ns)": 611928728581, "95.00 percentile latency (ns)": 629088349076, "97.00 percentile latency (ns)": 634830679036, "99.00 percentile latency (ns)": 639522200444, "99.90 percentile latency (ns)": 641164974875, "Max latency (ns)": 641317434545, "Mean latency (ns)": 389907256763, "Min duration satisfied": "Yes", "Min latency (ns)": 1476888473, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 3601.96, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 3601.96, "characteristics.samples_per_second.normalized_per_core": 3601.96, "characteristics.samples_per_second.normalized_per_processor": 3601.96, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 2310000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3500, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "4fadb9f32206ea6b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 428356010631, "90.00 percentile latency (ns)": 630777356757, "95.00 percentile latency (ns)": 648695726199, "97.00 percentile latency (ns)": 654523613262, "99.00 percentile latency (ns)": 659185583218, "99.90 percentile latency (ns)": 660757618770, "Max latency (ns)": 660884916932, "Mean latency (ns)": 402300039189, "Min duration satisfied": "Yes", "Min latency (ns)": 959278065, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 1747.66, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 1747.66, "characteristics.samples_per_second.normalized_per_core": 1747.66, "characteristics.samples_per_second.normalized_per_processor": 1747.66, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1155000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_datacenter", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 1750, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "e6fc4906ad7e94f2", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 415824802546, "90.00 percentile latency (ns)": 615935450245, "95.00 percentile latency (ns)": 633586322910, "97.00 percentile latency (ns)": 639534585887, "99.00 percentile latency (ns)": 644462548239, "99.90 percentile latency (ns)": 646224858882, "Max latency (ns)": 646388856610, "Mean latency (ns)": 391507599076, "Min duration satisfied": "Yes", "Min latency (ns)": 1388996157, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 3573.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 3573.7, "characteristics.samples_per_second.normalized_per_core": 3573.7, "characteristics.samples_per_second.normalized_per_processor": 3573.7, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 2310000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3500, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "4b8b817054e241aa", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 430303152377, "90.00 percentile latency (ns)": 632638308009, "95.00 percentile latency (ns)": 650458428022, "97.00 percentile latency (ns)": 656292361804, "99.00 percentile latency (ns)": 660972269585, "99.90 percentile latency (ns)": 662554075373, "Max latency (ns)": 662612067939, "Mean latency (ns)": 403890956755, "Min duration satisfied": "Yes", "Min latency (ns)": 1949793480, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 1743.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 1743.1, "characteristics.samples_per_second.normalized_per_core": 1743.1, "characteristics.samples_per_second.normalized_per_processor": 1743.1, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1155000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_Triton_datacenter", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 1750, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "27d9061a4e4f1630", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 511196520051, "90.00 percentile latency (ns)": 750134266031, "95.00 percentile latency (ns)": 770845853519, "97.00 percentile latency (ns)": 777588150534, "99.00 percentile latency (ns)": 783199448790, "99.90 percentile latency (ns)": 785133302019, "Max latency (ns)": 785248462199, "Mean latency (ns)": 480408391366, "Min duration satisfied": "Yes", "Min latency (ns)": 5574477056, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 13259.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13259.7, "characteristics.samples_per_second.normalized_per_core": 1657.4625, "characteristics.samples_per_second.normalized_per_processor": 1657.4625, "ck_system": "A30x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 10412159, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15776, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "51ec63c348d9876f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 434947579332, "90.00 percentile latency (ns)": 638310674430, "95.00 percentile latency (ns)": 656199990674, "97.00 percentile latency (ns)": 661996353242, "99.00 percentile latency (ns)": 666617753901, "99.90 percentile latency (ns)": 668007544173, "Max latency (ns)": 668154391359, "Mean latency (ns)": 408289472003, "Min duration satisfied": "Yes", "Min latency (ns)": 6770787400, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 6398.39, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6398.39, "characteristics.samples_per_second.normalized_per_core": 799.79875, "characteristics.samples_per_second.normalized_per_processor": 799.79875, "ck_system": "A30x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 4275110, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 6477.44, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "240509a39b9a166c", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 505550190419, "90.00 percentile latency (ns)": 744147492360, "95.00 percentile latency (ns)": 764957616572, "97.00 percentile latency (ns)": 771823628040, "99.00 percentile latency (ns)": 777565632682, "99.90 percentile latency (ns)": 779519968979, "Max latency (ns)": 779699561108, "Mean latency (ns)": 475277001708, "Min duration satisfied": "Yes", "Min latency (ns)": 4021953481, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 13354.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13354.1, "characteristics.samples_per_second.normalized_per_core": 1669.2625, "characteristics.samples_per_second.normalized_per_processor": 1669.2625, "ck_system": "A30x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 10412159, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15776, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "2ad2c8b05842f987", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 543392109260, "90.00 percentile latency (ns)": 799074755399, "95.00 percentile latency (ns)": 821303324061, "97.00 percentile latency (ns)": 828951689684, "99.00 percentile latency (ns)": 834730196650, "99.90 percentile latency (ns)": 836637930591, "Max latency (ns)": 836751644432, "Mean latency (ns)": 510382895260, "Min duration satisfied": "Yes", "Min latency (ns)": 4175771061, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 6404.77, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 6404.77, "characteristics.samples_per_second.normalized_per_core": 800.59625, "characteristics.samples_per_second.normalized_per_processor": 800.59625, "ck_system": "A30x8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 5359199, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x8_TRT_Triton", "system_name": "Gigabyte G482-Z54 (8x A30, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8120, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "806f37ee5f240228", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 435640465880, "90.00 percentile latency (ns)": 640845334601, "95.00 percentile latency (ns)": 658837155519, "97.00 percentile latency (ns)": 664773226705, "99.00 percentile latency (ns)": 669546394104, "99.90 percentile latency (ns)": 671180473280, "Max latency (ns)": 671303189156, "Mean latency (ns)": 409102049959, "Min duration satisfied": "Yes", "Min latency (ns)": 5102550220, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 17696.9, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 2289.9773809523826, "characteristics.power.normalized_per_core": 286.24717261904783, "characteristics.power.normalized_per_processor": 286.24717261904783, "characteristics.samples_per_second": 17696.9, "characteristics.samples_per_second.normalized_per_core": 2212.1125, "characteristics.samples_per_second.normalized_per_processor": 2212.1125, "ck_system": "A100-PCIex8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 11880000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_MaxQ", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 18000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "5528012ad134e34a", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 395782036851, "90.00 percentile latency (ns)": 582931182291, "95.00 percentile latency (ns)": 599562051510, "97.00 percentile latency (ns)": 604966680637, "99.00 percentile latency (ns)": 609178371354, "99.90 percentile latency (ns)": 610549321479, "Max latency (ns)": 610720900099, "Mean latency (ns)": 371504783216, "Min duration satisfied": "Yes", "Min latency (ns)": 5364023732, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 8645.52, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 2300.0793442622944, "characteristics.power.normalized_per_core": 287.5099180327868, "characteristics.power.normalized_per_processor": 287.5099180327868, "characteristics.samples_per_second": 8645.52, "characteristics.samples_per_second.normalized_per_core": 1080.69, "characteristics.samples_per_second.normalized_per_processor": 1080.69, "ck_system": "A100-PCIex8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 5280000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT_MaxQ", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "a8242668916a0dd6", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 453026095513, "90.00 percentile latency (ns)": 666548141587, "95.00 percentile latency (ns)": 685199885102, "97.00 percentile latency (ns)": 691422257219, "99.00 percentile latency (ns)": 696519400430, "99.90 percentile latency (ns)": 698301993120, "Max latency (ns)": 698477695479, "Mean latency (ns)": 426383871172, "Min duration satisfied": "Yes", "Min latency (ns)": 6208337371, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 28347.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 28347.4, "characteristics.samples_per_second.normalized_per_core": 3543.425, "characteristics.samples_per_second.normalized_per_processor": 3543.425, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 19800000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 30000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "b39b541707866101", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 463936342275, "90.00 percentile latency (ns)": 682211151549, "95.00 percentile latency (ns)": 701491648858, "97.00 percentile latency (ns)": 707806398009, "99.00 percentile latency (ns)": 712847732521, "99.90 percentile latency (ns)": 714538658393, "Max latency (ns)": 714720617622, "Mean latency (ns)": 435841818426, "Min duration satisfied": "Yes", "Min latency (ns)": 3339343338, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 13851.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13851.6, "characteristics.samples_per_second.normalized_per_core": 1731.45, "characteristics.samples_per_second.normalized_per_processor": 1731.45, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 9900000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "d6a98f941b1a4457", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 452173212101, "90.00 percentile latency (ns)": 668149683023, "95.00 percentile latency (ns)": 687309442187, "97.00 percentile latency (ns)": 693603490465, "99.00 percentile latency (ns)": 698703569133, "99.90 percentile latency (ns)": 700484794772, "Max latency (ns)": 700604971633, "Mean latency (ns)": 425140000911, "Min duration satisfied": "Yes", "Min latency (ns)": 3384794302, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 11304.5, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11304.5, "characteristics.samples_per_second.normalized_per_core": 2826.125, "characteristics.samples_per_second.normalized_per_processor": 2826.125, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 7920000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 12000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "812f49014ba245cc", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 534787488873, "90.00 percentile latency (ns)": 794745914506, "95.00 percentile latency (ns)": 817682920585, "97.00 percentile latency (ns)": 825267024740, "99.00 percentile latency (ns)": 831264389996, "99.90 percentile latency (ns)": 833239282734, "Max latency (ns)": 833424088849, "Mean latency (ns)": 503216444179, "Min duration satisfied": "Yes", "Min latency (ns)": 3224123144, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 5543.4, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 5543.4, "characteristics.samples_per_second.normalized_per_core": 1385.85, "characteristics.samples_per_second.normalized_per_processor": 1385.85, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 4620000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 7000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "09ba32ec85275022", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 433206856089, "90.00 percentile latency (ns)": 639501684222, "95.00 percentile latency (ns)": 657438837629, "97.00 percentile latency (ns)": 663461189107, "99.00 percentile latency (ns)": 668448389279, "99.90 percentile latency (ns)": 670221411880, "Max latency (ns)": 670365777810, "Mean latency (ns)": 407203095941, "Min duration satisfied": "Yes", "Min latency (ns)": 650388108, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 492.269, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 492.269, "characteristics.samples_per_second.normalized_per_core": 492.269, "characteristics.samples_per_second.normalized_per_processor": 492.269, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 330000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "system_name": "NVIDIA DGX-A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 500, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "54d61bae51fb052b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 396507683602, "90.00 percentile latency (ns)": 583040261520, "95.00 percentile latency (ns)": 599722068638, "97.00 percentile latency (ns)": 605089054213, "99.00 percentile latency (ns)": 609681392342, "99.90 percentile latency (ns)": 611161478626, "Max latency (ns)": 611272135567, "Mean latency (ns)": 372334471703, "Min duration satisfied": "Yes", "Min latency (ns)": 1197000340, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 242.936, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 242.936, "characteristics.samples_per_second.normalized_per_core": 242.936, "characteristics.samples_per_second.normalized_per_processor": 242.936, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 148500, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_HeteroMultiUse", "system_name": "NVIDIA DGX-A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, HeteroMultiUse)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 225, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "d621d7959c491587", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 430597734878, "90.00 percentile latency (ns)": 637815354843, "95.00 percentile latency (ns)": 656275063847, "97.00 percentile latency (ns)": 662303804503, "99.00 percentile latency (ns)": 667231306607, "99.90 percentile latency (ns)": 668882502955, "Max latency (ns)": 669007069485, "Mean latency (ns)": 405409677505, "Min duration satisfied": "Yes", "Min latency (ns)": 3028036804, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 9865.37, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 1272.0730941704037, "characteristics.power.normalized_per_core": 318.0182735426009, "characteristics.power.normalized_per_processor": 318.0182735426009, "characteristics.samples_per_second": 9865.37, "characteristics.samples_per_second.normalized_per_core": 2466.3425, "characteristics.samples_per_second.normalized_per_processor": 2466.3425, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 6600000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 10000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "4f8414172a72c47b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 445217725844, "90.00 percentile latency (ns)": 657585712288, "95.00 percentile latency (ns)": 676482637625, "97.00 percentile latency (ns)": 682626884666, "99.00 percentile latency (ns)": 687498510991, "99.90 percentile latency (ns)": 689156265762, "Max latency (ns)": 689272570160, "Mean latency (ns)": 418245619220, "Min duration satisfied": "Yes", "Min latency (ns)": 3236771715, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 4787.66, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 1268.9776487663275, "characteristics.power.normalized_per_core": 317.2444121915819, "characteristics.power.normalized_per_processor": 317.2444121915819, "characteristics.samples_per_second": 4787.66, "characteristics.samples_per_second.normalized_per_core": 1196.915, "characteristics.samples_per_second.normalized_per_processor": 1196.915, "ck_system": "DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "512 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 1, "host_storage_capacity": "10 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 3300000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-Station-A100_A100-SXM-80GBx4_TRT_MaxQ", "system_name": "NVIDIA DGX Station A100 (4x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5000, "task": "NLP", "task2": "nlp", "total_cores": 64, "uid": "7b5cd4104f7ebb8c", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 510426745956, "90.00 percentile latency (ns)": 750266278275, "95.00 percentile latency (ns)": 771051448016, "97.00 percentile latency (ns)": 777961572997, "99.00 percentile latency (ns)": 783579292348, "99.90 percentile latency (ns)": 785573864490, "Max latency (ns)": 785745914743, "Mean latency (ns)": 479940776169, "Min duration satisfied": "Yes", "Min latency (ns)": 6158661963, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 22847.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 22847.1, "characteristics.samples_per_second.normalized_per_core": 2855.8875, "characteristics.samples_per_second.normalized_per_processor": 2855.8875, "ck_system": "A100-PCIex8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 17952000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27200, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "a7fbd00339ea47e0", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 428591068975, "90.00 percentile latency (ns)": 629682669230, "95.00 percentile latency (ns)": 647331737377, "97.00 percentile latency (ns)": 653131602607, "99.00 percentile latency (ns)": 657707265117, "99.90 percentile latency (ns)": 659204126922, "Max latency (ns)": 659319101564, "Mean latency (ns)": 402474386417, "Min duration satisfied": "Yes", "Min latency (ns)": 5177784951, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 11179.5, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 11179.5, "characteristics.samples_per_second.normalized_per_core": 1397.4375, "characteristics.samples_per_second.normalized_per_processor": 1397.4375, "ck_system": "A100-PCIex8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 7370880, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex8_TRT", "system_name": "Gigabyte G482-Z54 (8x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 11168, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "502cbeb1e4c4e96b", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 439266480955, "90.00 percentile latency (ns)": 649121956138, "95.00 percentile latency (ns)": 667712963792, "97.00 percentile latency (ns)": 673985651870, "99.00 percentile latency (ns)": 679191601788, "99.90 percentile latency (ns)": 681107376697, "Max latency (ns)": 681243136759, "Mean latency (ns)": 414011267305, "Min duration satisfied": "Yes", "Min latency (ns)": 6021538910, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 28095.7, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 28095.7, "characteristics.samples_per_second.normalized_per_core": 3511.9625, "characteristics.samples_per_second.normalized_per_processor": 3511.9625, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 19140000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 29000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "4aecda975c87f0dd", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 466834918142, "90.00 percentile latency (ns)": 685666340686, "95.00 percentile latency (ns)": 705077471358, "97.00 percentile latency (ns)": 711404971710, "99.00 percentile latency (ns)": 716480242728, "99.90 percentile latency (ns)": 718217765793, "Max latency (ns)": 718377753375, "Mean latency (ns)": 438481433043, "Min duration satisfied": "Yes", "Min latency (ns)": 4234689325, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 13781.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 13781.1, "characteristics.samples_per_second.normalized_per_core": 1722.6375, "characteristics.samples_per_second.normalized_per_processor": 1722.6375, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 9900000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_Triton", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, TensorRT, Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "d4e999b36c89986e", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 548888252434, "90.00 percentile latency (ns)": 807110991785, "95.00 percentile latency (ns)": 829978688976, "97.00 percentile latency (ns)": 837554792077, "99.00 percentile latency (ns)": 843666402269, "99.90 percentile latency (ns)": 845793487493, "Max latency (ns)": 845952955039, "Mean latency (ns)": 515966595454, "Min duration satisfied": "Yes", "Min latency (ns)": 6446988423, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 23405.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 3457.6827423167842, "characteristics.power.normalized_per_core": 432.21034278959803, "characteristics.power.normalized_per_processor": 432.21034278959803, "characteristics.samples_per_second": 23405.6, "characteristics.samples_per_second.normalized_per_core": 2925.7, "characteristics.samples_per_second.normalized_per_processor": 2925.7, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 19800000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 30000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "04728554041749a8", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 588877201942, "90.00 percentile latency (ns)": 868025688910, "95.00 percentile latency (ns)": 892938308078, "97.00 percentile latency (ns)": 901091821895, "99.00 percentile latency (ns)": 907550209543, "99.90 percentile latency (ns)": 909743850984, "Max latency (ns)": 909962085616, "Mean latency (ns)": 552916827528, "Min duration satisfied": "Yes", "Min latency (ns)": 3432776304, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 10879.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.power": 3467.838571428575, "characteristics.power.normalized_per_core": 433.4798214285719, "characteristics.power.normalized_per_processor": 433.4798214285719, "characteristics.samples_per_second": 10879.6, "characteristics.samples_per_second.normalized_per_core": 1359.95, "characteristics.samples_per_second.normalized_per_processor": 1359.95, "ck_system": "DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 9900000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx8_TRT_MaxQ", "system_name": "NVIDIA DGX-A100 (8x A100-SXM-80GB, MaxQ, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "15440f8869fd7237", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 402887690452, "90.00 percentile latency (ns)": 595529309051, "95.00 percentile latency (ns)": 612648738812, "97.00 percentile latency (ns)": 618346410479, "99.00 percentile latency (ns)": 622857324175, "99.90 percentile latency (ns)": 624461888224, "Max latency (ns)": 624586479764, "Mean latency (ns)": 378550885426, "Min duration satisfied": "Yes", "Min latency (ns)": 2453516353, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 8453.59, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 8453.59, "characteristics.samples_per_second.normalized_per_core": 1056.69875, "characteristics.samples_per_second.normalized_per_processor": 1056.69875, "ck_system": "A10x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 5280000, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8000, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "891c5a6a440782e9", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 433112437750, "90.00 percentile latency (ns)": 640703173103, "95.00 percentile latency (ns)": 659153388443, "97.00 percentile latency (ns)": 665155478794, "99.00 percentile latency (ns)": 670039087508, "99.90 percentile latency (ns)": 671682857061, "Max latency (ns)": 671818442817, "Mean latency (ns)": 406759670132, "Min duration satisfied": "Yes", "Min latency (ns)": 1766335170, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 4181.13, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 4181.13, "characteristics.samples_per_second.normalized_per_core": 522.64125, "characteristics.samples_per_second.normalized_per_processor": 522.64125, "ck_system": "A10x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x8_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 2808960, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x A10, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4256, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "ad18021e4409895c", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 315628536474, "90.00 percentile latency (ns)": 568107781933, "95.00 percentile latency (ns)": 599680166640, "97.00 percentile latency (ns)": 612283019458, "99.00 percentile latency (ns)": 624863131545, "99.90 percentile latency (ns)": 630538975742, "Max latency (ns)": 631090332933, "Mean latency (ns)": 315651224659, "Min duration satisfied": "Yes", "Min latency (ns)": 301665302, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 86.8022, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "N/A", "accelerator_memory_configuration": "", "accelerator_model_name": "N/A", "accelerator_on-chip_memories": "", "accelerators_per_node": 0, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.samples_per_second": 86.8022, "characteristics.samples_per_second.normalized_per_core": 0.7750196428571429, "characteristics.samples_per_second.normalized_per_processor": 21.70055, "ck_system": "Triton_CPU_4S_8380Hx1", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "OpenVino 2021.2.200", "host_memory_capacity": "1536 GB", "host_memory_configuration": "6 slots / 32GB each / 3200 MT/s per socket", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380H CPU @ 2.90GHz", "host_processors_per_node": 4, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "fp32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 112, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/Triton_CPU_4S_8380Hx1", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS", "other_hardware": "", "other_software_stack": "Tensorflow 2.4.0, OpenVino 2021.2.200, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "No", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 54780, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "The original weight filename: bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "CPU Inference on Triton Inference Server", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Triton_CPU_4S_8380Hx1", "system_name": "Supermicro SYS-240P-TNRT (Cooper Lake running Triton)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 83, "task": "NLP", "task2": "nlp", "total_cores": 112, "uid": "d1d147d996656fc2", "use_accelerator": false, "weight_data_types": "int8", "weight_transformations": "We transfer the weight from int8 datatype in ONNX file to int8 datatype in OpenVino IR file." }, { "50.00 percentile latency (ns)": 42731343685, "90.00 percentile latency (ns)": 62964396104, "95.00 percentile latency (ns)": 64754804401, "97.00 percentile latency (ns)": 65360737094, "99.00 percentile latency (ns)": 65881697591, "99.90 percentile latency (ns)": 66062307170, "Max latency (ns)": 66063616827, "Mean latency (ns)": 40172377525, "Min duration satisfied": "Yes", "Min latency (ns)": 345477226, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 4046.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 4046.1, "characteristics.samples_per_second.normalized_per_core": 4046.1, "characteristics.samples_per_second.normalized_per_processor": 4046.1, "ck_system": "DGX-A100_A100-SXM4x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "open", "formal_model": "bert-99", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT Preview, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int8", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/open/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/open/NVIDIA/results/DGX-A100_A100-SXM4x1_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT Preview, CUDA 11.0 Update 1", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 267300, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert/bert_large_v1_1.onnx", "status": "rdi", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x1_TRT", "system_name": "NVIDIA DGX-A100 (1x A100-SXM4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4050, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "67374fd29aeb4ce6", "use_accelerator": true, "weight_data_types": "int8,fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 32025147098, "90.00 percentile latency (ns)": 57561208230, "95.00 percentile latency (ns)": 60763602390, "97.00 percentile latency (ns)": 62056399664, "99.00 percentile latency (ns)": 63342034113, "99.90 percentile latency (ns)": 63868740178, "Max latency (ns)": 63934773769, "Mean latency (ns)": 32017601081, "Min duration satisfied": "Yes", "Min latency (ns)": 329395184, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 25270.8, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 25270.8, "characteristics.samples_per_second.normalized_per_core": 3158.85, "characteristics.samples_per_second.normalized_per_processor": 3158.85, "ck_system": "DGX-A100_A100-SXM4x8_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x8_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1615680, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x8_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 24480, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "4fd894a1b59fc6f9", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 34623771374, "90.00 percentile latency (ns)": 61991081606, "95.00 percentile latency (ns)": 65319663459, "97.00 percentile latency (ns)": 66917919864, "99.00 percentile latency (ns)": 68255207783, "99.90 percentile latency (ns)": 68891231311, "Max latency (ns)": 68891873826, "Mean latency (ns)": 34549237919, "Min duration satisfied": "Yes", "Min latency (ns)": 631850912, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 12933.3, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 12933.3, "characteristics.samples_per_second.normalized_per_core": 1616.6625, "characteristics.samples_per_second.normalized_per_processor": 1616.6625, "ck_system": "DGX-A100_A100-SXM4x8_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x8_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 891000, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x8_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 13500, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "e4f231012348c381", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 42659637453, "90.00 percentile latency (ns)": 62378958311, "95.00 percentile latency (ns)": 64170244018, "97.00 percentile latency (ns)": 64678640314, "99.00 percentile latency (ns)": 65063271763, "99.90 percentile latency (ns)": 65256231756, "Max latency (ns)": 65268010034, "Mean latency (ns)": 40071903500, "Min duration satisfied": "Yes", "Min latency (ns)": 2572529176, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 8898.69, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 20, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 8898.69, "characteristics.samples_per_second.normalized_per_core": 444.9345, "characteristics.samples_per_second.normalized_per_processor": 444.9345, "ck_system": "T4x20_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 20, "normalize_processors": 20, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x20_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 580800, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x20_TRT", "system_name": "Supermicro 6049GP-TRT-OTO-29 (20x T4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 8800, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "4776a8e13939bf6a", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 44946368073, "90.00 percentile latency (ns)": 66137803573, "95.00 percentile latency (ns)": 68063420922, "97.00 percentile latency (ns)": 68622343925, "99.00 percentile latency (ns)": 69031900264, "99.90 percentile latency (ns)": 69190594689, "Max latency (ns)": 69197363501, "Mean latency (ns)": 42257043211, "Min duration satisfied": "Yes", "Min latency (ns)": 2605202016, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 4196.69, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 20, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 4196.69, "characteristics.samples_per_second.normalized_per_core": 209.8345, "characteristics.samples_per_second.normalized_per_processor": 209.8345, "ck_system": "T4x20_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 20, "normalize_processors": 20, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x20_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 290400, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x20_TRT", "system_name": "Supermicro 6049GP-TRT-OTO-29 (20x T4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 4400, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "9096c28649878e15", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 45036994971, "90.00 percentile latency (ns)": 66611810772, "95.00 percentile latency (ns)": 68526230817, "97.00 percentile latency (ns)": 69188822864, "99.00 percentile latency (ns)": 69726548944, "99.90 percentile latency (ns)": 69894386829, "Max latency (ns)": 69899224922, "Mean latency (ns)": 42389163978, "Min duration satisfied": "Yes", "Min latency (ns)": 484128162, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 5665.3, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 2, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 5665.3, "characteristics.samples_per_second.normalized_per_core": 2832.65, "characteristics.samples_per_second.normalized_per_processor": 2832.65, "ck_system": "A100-PCIex2_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 2, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex2_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 396000, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex2_TRT", "system_name": "Gigabyte G482-Z52 (2x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 6000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "d2dbcde151bd069b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 46608809347, "90.00 percentile latency (ns)": 68931127815, "95.00 percentile latency (ns)": 70886925291, "97.00 percentile latency (ns)": 71526024171, "99.00 percentile latency (ns)": 72027566675, "99.90 percentile latency (ns)": 72200592230, "Max latency (ns)": 72200689028, "Mean latency (ns)": 43826455946, "Min duration satisfied": "Yes", "Min latency (ns)": 827345008, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 2815.49, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 2, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 2815.49, "characteristics.samples_per_second.normalized_per_core": 1407.745, "characteristics.samples_per_second.normalized_per_processor": 1407.745, "ck_system": "A100-PCIex2_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 2, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex2_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 203280, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex2_TRT", "system_name": "Gigabyte G482-Z52 (2x A100-PCIe, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3080, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "b497870c0a910557", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 43810849879, "90.00 percentile latency (ns)": 64389884533, "95.00 percentile latency (ns)": 66245927484, "97.00 percentile latency (ns)": 66784766675, "99.00 percentile latency (ns)": 67261674130, "99.90 percentile latency (ns)": 67424212843, "Max latency (ns)": 67424953602, "Mean latency (ns)": 41376384024, "Min duration satisfied": "Yes", "Min latency (ns)": 1849087516, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 26625.2, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 26625.2, "characteristics.samples_per_second.normalized_per_core": 3328.15, "characteristics.samples_per_second.normalized_per_processor": 3328.15, "ck_system": "DGX-A100_A100-SXM4x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x8_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1795200, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x8_TRT", "system_name": "NVIDIA DGX-A100 (8x A100-SXM4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 27200, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "c53945a68d4d1ab8", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 48134929850, "90.00 percentile latency (ns)": 69972524633, "95.00 percentile latency (ns)": 71867110926, "97.00 percentile latency (ns)": 72493945378, "99.00 percentile latency (ns)": 72934820171, "99.90 percentile latency (ns)": 73127981439, "Max latency (ns)": 73128351583, "Mean latency (ns)": 45059895132, "Min duration satisfied": "Yes", "Min latency (ns)": 2930214437, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 13537.8, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 13537.8, "characteristics.samples_per_second.normalized_per_core": 1692.225, "characteristics.samples_per_second.normalized_per_processor": 1692.225, "ck_system": "DGX-A100_A100-SXM4x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x8_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 990000, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x8_TRT", "system_name": "NVIDIA DGX-A100 (8x A100-SXM4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 15000, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "c4c2489c0c0d05d6", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 43249877819, "90.00 percentile latency (ns)": 63293167028, "95.00 percentile latency (ns)": 65024190333, "97.00 percentile latency (ns)": 65558862127, "99.00 percentile latency (ns)": 65960102718, "99.90 percentile latency (ns)": 66103151795, "Max latency (ns)": 66103217390, "Mean latency (ns)": 40593635756, "Min duration satisfied": "Yes", "Min latency (ns)": 2499897843, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 3494.53, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 3494.53, "characteristics.samples_per_second.normalized_per_core": 436.81625, "characteristics.samples_per_second.normalized_per_processor": 436.81625, "ck_system": "T4x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x8_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 231000, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x T4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3500, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "4838bef23f6bb4c0", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 45201227473, "90.00 percentile latency (ns)": 66214193521, "95.00 percentile latency (ns)": 68159235473, "97.00 percentile latency (ns)": 68683994942, "99.00 percentile latency (ns)": 69086459758, "99.90 percentile latency (ns)": 69156047471, "Max latency (ns)": 69156077206, "Mean latency (ns)": 42483588584, "Min duration satisfied": "Yes", "Min latency (ns)": 2666203501, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "BERT SERVER", "Samples per second": 1603.33, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 1603.33, "characteristics.samples_per_second.normalized_per_core": 200.41625, "characteristics.samples_per_second.normalized_per_processor": 200.41625, "ck_system": "T4x8_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x8_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 110880, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (8x T4, TensorRT)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 1680, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "8df7e250a4eb4b89", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 30874731052, "90.00 percentile latency (ns)": 55737238370, "95.00 percentile latency (ns)": 58825214801, "97.00 percentile latency (ns)": 60065430293, "99.00 percentile latency (ns)": 61353954204, "99.90 percentile latency (ns)": 61866510754, "Max latency (ns)": 61983468637, "Mean latency (ns)": 30984328407, "Min duration satisfied": "Yes", "Min latency (ns)": 578326102, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 8433.22, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 20, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 8433.22, "characteristics.samples_per_second.normalized_per_core": 421.66099999999994, "characteristics.samples_per_second.normalized_per_processor": 421.66099999999994, "ck_system": "T4x20_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 20, "normalize_processors": 20, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x20_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 522720, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x20_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 7920, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "fd362471390e805f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 33605558352, "90.00 percentile latency (ns)": 60715524465, "95.00 percentile latency (ns)": 63917114418, "97.00 percentile latency (ns)": 65283024883, "99.00 percentile latency (ns)": 66641225326, "99.90 percentile latency (ns)": 67373338687, "Max latency (ns)": 67409080794, "Mean latency (ns)": 33788148128, "Min duration satisfied": "Yes", "Min latency (ns)": 1198967268, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 3877.22, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 20, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 3877.22, "characteristics.samples_per_second.normalized_per_core": 193.861, "characteristics.samples_per_second.normalized_per_processor": 193.861, "ck_system": "T4x20_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 20, "normalize_processors": 20, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x20_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 261360, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x20_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3960, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "3380e9cb242c4452", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 30629595492, "90.00 percentile latency (ns)": 55228721798, "95.00 percentile latency (ns)": 58284924827, "97.00 percentile latency (ns)": 59508009729, "99.00 percentile latency (ns)": 60692008624, "99.90 percentile latency (ns)": 61304532794, "Max latency (ns)": 61304904245, "Mean latency (ns)": 30689566147, "Min duration satisfied": "Yes", "Min latency (ns)": 557461945, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 3391.25, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 3391.25, "characteristics.samples_per_second.normalized_per_core": 423.90625, "characteristics.samples_per_second.normalized_per_processor": 423.90625, "ck_system": "T4x8_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x8_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 207900, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 3150, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "a23ec49ae4f0992d", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 32331897402, "90.00 percentile latency (ns)": 57894041184, "95.00 percentile latency (ns)": 61488230199, "97.00 percentile latency (ns)": 62866203681, "99.00 percentile latency (ns)": 63543313637, "99.90 percentile latency (ns)": 64361193282, "Max latency (ns)": 64361316919, "Mean latency (ns)": 32386334310, "Min duration satisfied": "Yes", "Min latency (ns)": 1224417773, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 1550.5, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 1550.5, "characteristics.samples_per_second.normalized_per_core": 193.8125, "characteristics.samples_per_second.normalized_per_processor": 193.8125, "ck_system": "T4x8_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x8_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 99792, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 1512, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "4fdcb2966010a25c", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 34083599574, "90.00 percentile latency (ns)": 61533388883, "95.00 percentile latency (ns)": 64972252427, "97.00 percentile latency (ns)": 66345646822, "99.00 percentile latency (ns)": 67713119182, "99.90 percentile latency (ns)": 68322621717, "Max latency (ns)": 68399030070, "Mean latency (ns)": 34120100134, "Min duration satisfied": "Yes", "Min latency (ns)": 96961887, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 5210.6, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 2, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 5210.6, "characteristics.samples_per_second.normalized_per_core": 2605.3, "characteristics.samples_per_second.normalized_per_processor": 2605.3, "ck_system": "A100-PCIex2_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 2, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex2_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 356400, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex2_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 5400, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "28dab154149e2823", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 33966899701, "90.00 percentile latency (ns)": 61104193400, "95.00 percentile latency (ns)": 64574404227, "97.00 percentile latency (ns)": 65928190886, "99.00 percentile latency (ns)": 67264447196, "99.90 percentile latency (ns)": 67855058155, "Max latency (ns)": 67859506077, "Mean latency (ns)": 33928706373, "Min duration satisfied": "Yes", "Min latency (ns)": 191796088, "Min queries satisfied": "Yes", "Mode": "Performance", "Result is": "VALID", "SUT name": "Triton_Server", "Samples per second": 2694.1, "Scenario": "offline", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 2, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.samples_per_second": 2694.1, "characteristics.samples_per_second.normalized_per_core": 1347.05, "characteristics.samples_per_second.normalized_per_processor": 1347.05, "ck_system": "A100-PCIex2_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_x_maximize": true, "dim_y_default": "characteristics.samples_per_second", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.9, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99.9", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1, "mlperf_version": 0.7, "normalize_cores": 2, "normalize_processors": 2, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex2_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 182820, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex2_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "datacenter", "target_latency (ns)": 0, "target_qps": 2770, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "b2a455c9a39f271e", "use_accelerator": true, "weight_data_types": "fp16", "weight_transformations": "quantization, affine fusion" } ]