[ { "50.00 percentile latency (ns)": 2077997, "90.00 percentile latency (ns)": 2279136, "90th percentile latency (ns)": 2279136, "95.00 percentile latency (ns)": 2718198, "97.00 percentile latency (ns)": 2803388, "99.00 percentile latency (ns)": 4570326, "99.90 percentile latency (ns)": 4924855, "Max latency (ns)": 15824744, "Mean latency (ns)": 2197059, "Min duration satisfied": "Yes", "Min latency (ns)": 1963534, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 448.92, "QPS w/o loadgen overhead": 455.15, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.279136, "characteristics.90th_percentile_latency_ns": 2279136.0, "characteristics.90th_percentile_latency_s": 0.002279136, "characteristics.90th_percentile_latency_us": 2279.136, "ck_system": "A30x1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x1_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A30, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 294.118, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "1c3902f4e8bafa7b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 43083284, "90.00 percentile latency (ns)": 57588379, "90th percentile latency (ns)": 57588379, "95.00 percentile latency (ns)": 59317452, "97.00 percentile latency (ns)": 61188631, "99.00 percentile latency (ns)": 61593569, "99.90 percentile latency (ns)": 61668034, "Max latency (ns)": 61855773, "Mean latency (ns)": 43549746, "Min duration satisfied": "Yes", "Min latency (ns)": 17000364, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 22.95, "QPS w/o loadgen overhead": 22.96, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 57.588379, "characteristics.90th_percentile_latency_ns": 57588379.0, "characteristics.90th_percentile_latency_s": 0.057588379, "characteristics.90th_percentile_latency_us": 57588.379, "characteristics.power": 0.5684125137438257, "characteristics.power.normalized_per_core": 0.5684125137438257, "characteristics.power.normalized_per_processor": 0.5684125137438257, "ck_system": "Xavier_NX_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "8 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/Xavier_NX_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT_MaxQ", "system_name": "Auvidea JNX30 Xavier NX (MaxQ, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "4fa59e174409938e", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 20796508, "90.00 percentile latency (ns)": 30667184, "90th percentile latency (ns)": 30667184, "95.00 percentile latency (ns)": 31752996, "97.00 percentile latency (ns)": 32638702, "99.00 percentile latency (ns)": 33187688, "99.90 percentile latency (ns)": 33714400, "Max latency (ns)": 37157797, "Mean latency (ns)": 22045684, "Min duration satisfied": "Yes", "Min latency (ns)": 10147522, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 45.32, "QPS w/o loadgen overhead": 45.36, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 30.667184, "characteristics.90th_percentile_latency_ns": 30667184.0, "characteristics.90th_percentile_latency_s": 0.030667184, "characteristics.90th_percentile_latency_us": 30667.184, "ck_system": "AGX_Xavier_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "32 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT_Triton", "system_name": "NVIDIA Jetson AGX Xavier 32GB (TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "802da2914f8db545", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1575472, "90.00 percentile latency (ns)": 1683101, "90th percentile latency (ns)": 1683101, "95.00 percentile latency (ns)": 1930320, "97.00 percentile latency (ns)": 1968139, "99.00 percentile latency (ns)": 3642631, "99.90 percentile latency (ns)": 3874759, "Max latency (ns)": 12800032, "Mean latency (ns)": 1654417, "Min duration satisfied": "Yes", "Min latency (ns)": 1479182, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 594.84, "QPS w/o loadgen overhead": 604.44, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.683101, "characteristics.90th_percentile_latency_ns": 1683101.0, "characteristics.90th_percentile_latency_s": 0.001683101, "characteristics.90th_percentile_latency_us": 1683.101, "ck_system": "A100-PCIex1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "a22a6a7c86558184", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 20111771, "90.00 percentile latency (ns)": 29940592, "90th percentile latency (ns)": 29940592, "95.00 percentile latency (ns)": 30923390, "97.00 percentile latency (ns)": 31870634, "99.00 percentile latency (ns)": 32365411, "99.90 percentile latency (ns)": 32729013, "Max latency (ns)": 33167291, "Mean latency (ns)": 21332544, "Min duration satisfied": "Yes", "Min latency (ns)": 9594231, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 46.81, "QPS w/o loadgen overhead": 46.88, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 29.940592, "characteristics.90th_percentile_latency_ns": 29940592.0, "characteristics.90th_percentile_latency_s": 0.029940592, "characteristics.90th_percentile_latency_us": 29940.592, "ck_system": "AGX_Xavier_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "32 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT", "system_name": "NVIDIA Jetson AGX Xavier 32GB (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "83a444d853ef8543", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 4607315, "90.00 percentile latency (ns)": 5450229, "90th percentile latency (ns)": 5450229, "95.00 percentile latency (ns)": 15458830, "97.00 percentile latency (ns)": 15883003, "99.00 percentile latency (ns)": 16691421, "99.90 percentile latency (ns)": 16904915, "Max latency (ns)": 29635064, "Mean latency (ns)": 5324926, "Min duration satisfied": "Yes", "Min latency (ns)": 3538323, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 187.56, "QPS w/o loadgen overhead": 187.8, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 5.450229, "characteristics.90th_percentile_latency_ns": 5450229.0, "characteristics.90th_percentile_latency_s": 0.005450229, "characteristics.90th_percentile_latency_us": 5450.229, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "48bb943c44433c7f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1427818, "90.00 percentile latency (ns)": 1598367, "90th percentile latency (ns)": 1598367, "95.00 percentile latency (ns)": 1715875, "97.00 percentile latency (ns)": 1779445, "99.00 percentile latency (ns)": 1789295, "99.90 percentile latency (ns)": 1834555, "Max latency (ns)": 8377128, "Mean latency (ns)": 1470697, "Min duration satisfied": "Yes", "Min latency (ns)": 1316468, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 666.02, "QPS w/o loadgen overhead": 679.95, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.598367, "characteristics.90th_percentile_latency_ns": 1598367.0, "characteristics.90th_percentile_latency_s": 0.001598367, "characteristics.90th_percentile_latency_us": 1598.367, "ck_system": "A100-PCIe-80GBx1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx1_TRT", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe-80GB, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "8774b58973ca1019", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1438017, "90.00 percentile latency (ns)": 1563472, "90th percentile latency (ns)": 1563472, "95.00 percentile latency (ns)": 1666795, "97.00 percentile latency (ns)": 1746745, "99.00 percentile latency (ns)": 1755682, "99.90 percentile latency (ns)": 1760702, "Max latency (ns)": 4457358, "Mean latency (ns)": 1466009, "Min duration satisfied": "Yes", "Min latency (ns)": 1339281, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 679.11, "QPS w/o loadgen overhead": 682.12, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.563472, "characteristics.90th_percentile_latency_ns": 1563472.0, "characteristics.90th_percentile_latency_s": 0.001563472, "characteristics.90th_percentile_latency_us": 1563.472, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "b9d14c6db3325636", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 36676517, "90.00 percentile latency (ns)": 47711320, "90th percentile latency (ns)": 47711320, "95.00 percentile latency (ns)": 50253266, "97.00 percentile latency (ns)": 50954943, "99.00 percentile latency (ns)": 52478290, "99.90 percentile latency (ns)": 55119916, "Max latency (ns)": 60423903, "Mean latency (ns)": 37382247, "Min duration satisfied": "Yes", "Min latency (ns)": 14176785, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 26.74, "QPS w/o loadgen overhead": 26.75, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 47.71132, "characteristics.90th_percentile_latency_ns": 47711320.0, "characteristics.90th_percentile_latency_s": 0.04771132, "characteristics.90th_percentile_latency_us": 47711.32, "ck_system": "Xavier_NX_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "8 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/Xavier_NX_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT_Triton", "system_name": "NVIDIA Jetson Xavier NX (TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "21f746474a6e9699", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 4013584, "90.00 percentile latency (ns)": 5775265, "90th percentile latency (ns)": 5775265, "95.00 percentile latency (ns)": 7564047, "97.00 percentile latency (ns)": 7733587, "99.00 percentile latency (ns)": 7827379, "99.90 percentile latency (ns)": 7844598, "Max latency (ns)": 9856422, "Mean latency (ns)": 4469691, "Min duration satisfied": "Yes", "Min latency (ns)": 2927503, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 222.08, "QPS w/o loadgen overhead": 223.73, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 5.775265, "characteristics.90th_percentile_latency_ns": 5775265.0, "characteristics.90th_percentile_latency_s": 0.005775265, "characteristics.90th_percentile_latency_us": 5775.265, "ck_system": "A30-MIG_1x1g.6gb_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.6gb_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.6gb_TRT", "system_name": "Gigabyte G482-Z54 (1x A30-MIG-1x1g.6gb, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 166.683, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "5f40e2b9ed7d91aa", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 29156519, "90.00 percentile latency (ns)": 43663653, "90th percentile latency (ns)": 43663653, "95.00 percentile latency (ns)": 45250198, "97.00 percentile latency (ns)": 47095892, "99.00 percentile latency (ns)": 47622076, "99.90 percentile latency (ns)": 47869275, "Max latency (ns)": 51613604, "Mean latency (ns)": 30698436, "Min duration satisfied": "Yes", "Min latency (ns)": 13609068, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 32.55, "QPS w/o loadgen overhead": 32.57, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 43.663653, "characteristics.90th_percentile_latency_ns": 43663653.0, "characteristics.90th_percentile_latency_s": 0.043663653, "characteristics.90th_percentile_latency_us": 43663.653, "characteristics.power": 0.6106034165819225, "characteristics.power.normalized_per_core": 0.6106034165819225, "characteristics.power.normalized_per_processor": 0.6106034165819225, "ck_system": "AGX_Xavier_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "32 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT_MaxQ", "system_name": "Auvidea X220-LC AGX Xavier 32GB (MaxQ, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "d48159dc0bdd6926", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1570005, "90.00 percentile latency (ns)": 1677006, "90th percentile latency (ns)": 1677006, "95.00 percentile latency (ns)": 1783804, "97.00 percentile latency (ns)": 1861694, "99.00 percentile latency (ns)": 3576306, "99.90 percentile latency (ns)": 3986154, "Max latency (ns)": 11452729, "Mean latency (ns)": 1638905, "Min duration satisfied": "Yes", "Min latency (ns)": 1463027, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 599.17, "QPS w/o loadgen overhead": 610.16, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.677006, "characteristics.90th_percentile_latency_ns": 1677006.0, "characteristics.90th_percentile_latency_s": 0.001677006, "characteristics.90th_percentile_latency_us": 1677.006, "ck_system": "A100-PCIe-80GBx1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIe-80GBx1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIe-80GBx1_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe-80GB, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "39ad286694e4d9bb", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2503946, "90.00 percentile latency (ns)": 2640315, "90th percentile latency (ns)": 2640315, "95.00 percentile latency (ns)": 3226971, "97.00 percentile latency (ns)": 3321155, "99.00 percentile latency (ns)": 5347803, "99.90 percentile latency (ns)": 5783373, "Max latency (ns)": 14176186, "Mean latency (ns)": 2609821, "Min duration satisfied": "Yes", "Min latency (ns)": 2264095, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 372.3, "QPS w/o loadgen overhead": 383.17, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.640315, "characteristics.90th_percentile_latency_ns": 2640315.0, "characteristics.90th_percentile_latency_s": 0.002640315, "characteristics.90th_percentile_latency_us": 2640.315, "ck_system": "A10x1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x1_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (1x A10, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 357.143, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "546c8889c37d35c2", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1537954, "90.00 percentile latency (ns)": 1594379, "90th percentile latency (ns)": 1594379, "95.00 percentile latency (ns)": 1708984, "97.00 percentile latency (ns)": 1779286, "99.00 percentile latency (ns)": 2785172, "99.90 percentile latency (ns)": 2934833, "Max latency (ns)": 9640928, "Mean latency (ns)": 1581057, "Min duration satisfied": "Yes", "Min latency (ns)": 1463083, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 630.45, "QPS w/o loadgen overhead": 632.49, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.594379, "characteristics.90th_percentile_latency_ns": 1594379.0, "characteristics.90th_percentile_latency_s": 0.001594379, "characteristics.90th_percentile_latency_us": 1594.379, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_Triton", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "e23461ec464ba2e1", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2304997, "90.00 percentile latency (ns)": 2603425, "90th percentile latency (ns)": 2603425, "95.00 percentile latency (ns)": 3199166, "97.00 percentile latency (ns)": 3244633, "99.00 percentile latency (ns)": 3298979, "99.90 percentile latency (ns)": 3387339, "Max latency (ns)": 14474159, "Mean latency (ns)": 2389140, "Min duration satisfied": "Yes", "Min latency (ns)": 2039663, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 407.67, "QPS w/o loadgen overhead": 418.56, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.603425, "characteristics.90th_percentile_latency_ns": 2603425.0, "characteristics.90th_percentile_latency_s": 0.002603425, "characteristics.90th_percentile_latency_us": 2603.425, "ck_system": "A10x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A10x1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x1_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (1x A10, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 357.143, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "c4b744cefbc81306", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1839538, "90.00 percentile latency (ns)": 2227984, "90th percentile latency (ns)": 2227984, "95.00 percentile latency (ns)": 2660727, "97.00 percentile latency (ns)": 2733258, "99.00 percentile latency (ns)": 2744967, "99.90 percentile latency (ns)": 2793068, "Max latency (ns)": 10654519, "Mean latency (ns)": 1944131, "Min duration satisfied": "Yes", "Min latency (ns)": 1708988, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 507.56, "QPS w/o loadgen overhead": 514.37, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.227984, "characteristics.90th_percentile_latency_ns": 2227984.0, "characteristics.90th_percentile_latency_s": 0.002227984, "characteristics.90th_percentile_latency_us": 2227.984, "ck_system": "A30x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30x1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x1_TRT", "system_name": "Gigabyte G482-Z54 (1x A30, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "9e5d09048debab96", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 3852237, "90.00 percentile latency (ns)": 5389834, "90th percentile latency (ns)": 5389834, "95.00 percentile latency (ns)": 6560529, "97.00 percentile latency (ns)": 6797447, "99.00 percentile latency (ns)": 6889332, "99.90 percentile latency (ns)": 6898088, "Max latency (ns)": 9207466, "Mean latency (ns)": 4178918, "Min duration satisfied": "Yes", "Min latency (ns)": 2716458, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 238.88, "QPS w/o loadgen overhead": 239.3, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 5.389834, "characteristics.90th_percentile_latency_ns": 5389834.0, "characteristics.90th_percentile_latency_s": 0.005389834, "characteristics.90th_percentile_latency_us": 5389.834, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "system_name": "NVIDIA DGX A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "c0c5d58e33ce4be1", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 4763623, "90.00 percentile latency (ns)": 5827756, "90th percentile latency (ns)": 5827756, "95.00 percentile latency (ns)": 11248649, "97.00 percentile latency (ns)": 11419360, "99.00 percentile latency (ns)": 11535678, "99.90 percentile latency (ns)": 11576117, "Max latency (ns)": 18709664, "Mean latency (ns)": 5321563, "Min duration satisfied": "Yes", "Min latency (ns)": 3737892, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 186.98, "QPS w/o loadgen overhead": 187.91, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.6gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 5.827756, "characteristics.90th_percentile_latency_ns": 5827756.0, "characteristics.90th_percentile_latency_s": 0.005827756, "characteristics.90th_percentile_latency_us": 5827.756, "ck_system": "A30-MIG_1x1g.6gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.6gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0, Triton 21.07", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.6gb_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A30-MIG-1x1g.6gb, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 166.683, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "f1ca58f39c4479b7", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1453172, "90.00 percentile latency (ns)": 1620981, "90th percentile latency (ns)": 1620981, "95.00 percentile latency (ns)": 1871060, "97.00 percentile latency (ns)": 1887859, "99.00 percentile latency (ns)": 1895110, "99.90 percentile latency (ns)": 1939490, "Max latency (ns)": 15928874, "Mean latency (ns)": 1501546, "Min duration satisfied": "Yes", "Min latency (ns)": 1350612, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 654.18, "QPS w/o loadgen overhead": 665.98, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.620981, "characteristics.90th_percentile_latency_ns": 1620981.0, "characteristics.90th_percentile_latency_s": 0.001620981, "characteristics.90th_percentile_latency_us": 1620.981, "ck_system": "A100-PCIex1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 8.0.1, CUDA 11.3", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 20.04.4", "other_hardware": "", "other_software_stack": "TensorRT 8.0.1, CUDA 11.3, cuDNN 8.2.1, Driver 470.42.01, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "20e9e4473abc303c", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 35424010, "90.00 percentile latency (ns)": 46411546, "90th percentile latency (ns)": 46411546, "95.00 percentile latency (ns)": 47983549, "97.00 percentile latency (ns)": 49908420, "99.00 percentile latency (ns)": 50469279, "99.90 percentile latency (ns)": 50562209, "Max latency (ns)": 51254743, "Mean latency (ns)": 35766873, "Min duration satisfied": "Yes", "Min latency (ns)": 13627941, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 27.95, "QPS w/o loadgen overhead": 27.96, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 46.411546, "characteristics.90th_percentile_latency_ns": 46411546.0, "characteristics.90th_percentile_latency_s": 0.046411546, "characteristics.90th_percentile_latency_us": 46411.546, "ck_system": "Xavier_NX_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2", "host_memory_capacity": "8 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline scenario", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.1, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.1/tree/master/closed/NVIDIA/results/Xavier_NX_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04", "other_hardware": "", "other_software_stack": "JetPack 4.6, TensorRT 8.0.1, CUDA 10.2, cuDNN 8.2.3, DALI 0.31.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 1624344308455410291, "retraining": "No", "sample_index_rng_seed": 517984244576520566, "samples_per_query": 1, "schedule_rng_seed": 10051496985653635065, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT", "system_name": "NVIDIA Jetson Xavier NX (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "63771489b8d09857", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2342132, "90.00 percentile latency (ns)": 2561073, "90th percentile latency (ns)": 2561073, "95.00 percentile latency (ns)": 2776712, "97.00 percentile latency (ns)": 2862862, "99.00 percentile latency (ns)": 4568150, "99.90 percentile latency (ns)": 4760230, "Max latency (ns)": 23197697, "Mean latency (ns)": 2435946, "Min duration satisfied": "Yes", "Min latency (ns)": 2199473, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 405.65, "QPS w/o loadgen overhead": 410.52, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.561073, "characteristics.90th_percentile_latency_ns": 2561073.0, "characteristics.90th_percentile_latency_s": 0.002561073, "characteristics.90th_percentile_latency_us": 2561.073, "ck_system": "A30x1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x1_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A30, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 294.118, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "2cd4111af925a730", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 43130568, "90.00 percentile latency (ns)": 57538713, "90th percentile latency (ns)": 57538713, "95.00 percentile latency (ns)": 59828858, "97.00 percentile latency (ns)": 61363920, "99.00 percentile latency (ns)": 61648627, "99.90 percentile latency (ns)": 61770524, "Max latency (ns)": 65071180, "Mean latency (ns)": 43650676, "Min duration satisfied": "Yes", "Min latency (ns)": 17150021, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 22.9, "QPS w/o loadgen overhead": 22.91, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 57.538713, "characteristics.90th_percentile_latency_ns": 57538713.0, "characteristics.90th_percentile_latency_s": 0.057538713, "characteristics.90th_percentile_latency_us": 57538.713, "characteristics.power": 0.58970071396542, "characteristics.power.normalized_per_core": 0.58970071396542, "characteristics.power.normalized_per_processor": 0.58970071396542, "ck_system": "Xavier_NX_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2", "host_memory_capacity": "8 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/Xavier_NX_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2, cuDNN 8.0.0, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT_MaxQ", "system_name": "NVIDIA Jetson Xavier NX (MaxQ, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "b00592be7e4ad495", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1700211, "90.00 percentile latency (ns)": 1779320, "90th percentile latency (ns)": 1779320, "95.00 percentile latency (ns)": 1843966, "97.00 percentile latency (ns)": 1918806, "99.00 percentile latency (ns)": 3649165, "99.90 percentile latency (ns)": 4184002, "Max latency (ns)": 12371107, "Mean latency (ns)": 1762910, "Min duration satisfied": "Yes", "Min latency (ns)": 1588270, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 558.62, "QPS w/o loadgen overhead": 567.24, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.77932, "characteristics.90th_percentile_latency_ns": 1779320.0, "characteristics.90th_percentile_latency_s": 0.00177932, "characteristics.90th_percentile_latency_us": 1779.32, "ck_system": "A100-PCIex1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT_Triton", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "7f86bf9253c92f8d", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 20609609, "90.00 percentile latency (ns)": 31091144, "90th percentile latency (ns)": 31091144, "95.00 percentile latency (ns)": 32101435, "97.00 percentile latency (ns)": 32498352, "99.00 percentile latency (ns)": 32832193, "99.90 percentile latency (ns)": 33199091, "Max latency (ns)": 33466114, "Mean latency (ns)": 21813724, "Min duration satisfied": "Yes", "Min latency (ns)": 9722519, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 45.78, "QPS w/o loadgen overhead": 45.84, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 31.091144, "characteristics.90th_percentile_latency_ns": 31091144.0, "characteristics.90th_percentile_latency_s": 0.031091144, "characteristics.90th_percentile_latency_us": 31091.144, "ck_system": "AGX_Xavier_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2", "host_memory_capacity": "32 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2, cuDNN 8.0.0, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT", "system_name": "NVIDIA Jetson AGX Xavier 32GB (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "7a24264d2382e44c", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1565562, "90.00 percentile latency (ns)": 1708807, "90th percentile latency (ns)": 1708807, "95.00 percentile latency (ns)": 1768102, "97.00 percentile latency (ns)": 1823835, "99.00 percentile latency (ns)": 1832915, "99.90 percentile latency (ns)": 1840946, "Max latency (ns)": 16022567, "Mean latency (ns)": 1593010, "Min duration satisfied": "Yes", "Min latency (ns)": 1472255, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 622.45, "QPS w/o loadgen overhead": 627.74, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.708807, "characteristics.90th_percentile_latency_ns": 1708807.0, "characteristics.90th_percentile_latency_s": 0.001708807, "characteristics.90th_percentile_latency_us": 1708.807, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_edge", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_edge", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_edge", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "2b4e5da9ebfecbec", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 5518167, "90.00 percentile latency (ns)": 6460815, "90th percentile latency (ns)": 6460815, "95.00 percentile latency (ns)": 6867808, "97.00 percentile latency (ns)": 10179218, "99.00 percentile latency (ns)": 10221878, "99.90 percentile latency (ns)": 10286238, "Max latency (ns)": 14062900, "Mean latency (ns)": 5686014, "Min duration satisfied": "Yes", "Min latency (ns)": 4592081, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 175.7, "QPS w/o loadgen overhead": 175.87, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 6.460815, "characteristics.90th_percentile_latency_ns": 6460815.0, "characteristics.90th_percentile_latency_s": 0.006460815, "characteristics.90th_percentile_latency_us": 6460.815, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT_Triton", "system_name": "NVIDIA DGX-A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "9ad942bf7540d7e4", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 34734823, "90.00 percentile latency (ns)": 53540217, "90th percentile latency (ns)": 53540217, "95.00 percentile latency (ns)": 56157917, "97.00 percentile latency (ns)": 57284068, "99.00 percentile latency (ns)": 57527970, "99.90 percentile latency (ns)": 57721066, "Max latency (ns)": 58017480, "Mean latency (ns)": 36570694, "Min duration satisfied": "Yes", "Min latency (ns)": 15348224, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 27.33, "QPS w/o loadgen overhead": 27.34, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 53.540217, "characteristics.90th_percentile_latency_ns": 53540217.0, "characteristics.90th_percentile_latency_s": 0.053540217, "characteristics.90th_percentile_latency_us": 53540.217, "characteristics.power": 0.6719735394359012, "characteristics.power.normalized_per_core": 0.6719735394359012, "characteristics.power.normalized_per_processor": 0.6719735394359012, "ck_system": "AGX_Xavier_TRT_MaxQ", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2", "host_memory_capacity": "32 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT_MaxQ", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2, cuDNN 8.0.0, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT_MaxQ", "system_name": "NVIDIA Jetson AGX Xavier 32GB (MaxQ, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "4bc69497d93ae498", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 5923459, "90.00 percentile latency (ns)": 7542141, "90th percentile latency (ns)": 7542141, "95.00 percentile latency (ns)": 7779103, "97.00 percentile latency (ns)": 8009820, "99.00 percentile latency (ns)": 8088236, "99.90 percentile latency (ns)": 8107924, "Max latency (ns)": 11332632, "Mean latency (ns)": 6041106, "Min duration satisfied": "Yes", "Min latency (ns)": 4762893, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 164.48, "QPS w/o loadgen overhead": 165.53, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.3gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 7.542141, "characteristics.90th_percentile_latency_ns": 7542141.0, "characteristics.90th_percentile_latency_s": 0.007542141, "characteristics.90th_percentile_latency_us": 7542.141, "ck_system": "A30-MIG_1x1g.3gb_TRT", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.3gb_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.3gb_TRT", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 134.177, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "16f92689764f3683", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2785527, "90.00 percentile latency (ns)": 2897452, "90th percentile latency (ns)": 2897452, "95.00 percentile latency (ns)": 2949598, "97.00 percentile latency (ns)": 3015670, "99.00 percentile latency (ns)": 5014987, "99.90 percentile latency (ns)": 5357748, "Max latency (ns)": 15118054, "Mean latency (ns)": 2845582, "Min duration satisfied": "Yes", "Min latency (ns)": 2537790, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 342.0, "QPS w/o loadgen overhead": 351.42, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.897452, "characteristics.90th_percentile_latency_ns": 2897452.0, "characteristics.90th_percentile_latency_s": 0.002897452, "characteristics.90th_percentile_latency_us": 2897.452, "ck_system": "A10x1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x1_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x1_TRT_Triton", "system_name": "Supermicro 4029GP-TRT-OTO-28 (1x A10, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 357.143, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "7514dafa58994410", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1644754, "90.00 percentile latency (ns)": 1725456, "90th percentile latency (ns)": 1725456, "95.00 percentile latency (ns)": 1786831, "97.00 percentile latency (ns)": 1837796, "99.00 percentile latency (ns)": 2878067, "99.90 percentile latency (ns)": 3040051, "Max latency (ns)": 7705278, "Mean latency (ns)": 1685459, "Min duration satisfied": "Yes", "Min latency (ns)": 1577689, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 591.09, "QPS w/o loadgen overhead": 593.31, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.725456, "characteristics.90th_percentile_latency_ns": 1725456.0, "characteristics.90th_percentile_latency_s": 0.001725456, "characteristics.90th_percentile_latency_us": 1725.456, "ck_system": "DGX-A100_A100-SXM-80GBx1_TRT_Triton_edge", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GBx1_TRT_Triton_edge", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0, Triton 21.02; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GBx1_TRT_Triton_edge", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "d8a279eaf954ccc9", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2613784, "90.00 percentile latency (ns)": 2839189, "90th percentile latency (ns)": 2839189, "95.00 percentile latency (ns)": 2898156, "97.00 percentile latency (ns)": 2926023, "99.00 percentile latency (ns)": 2972269, "99.90 percentile latency (ns)": 3078020, "Max latency (ns)": 10959812, "Mean latency (ns)": 2641242, "Min duration satisfied": "Yes", "Min latency (ns)": 2369217, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 369.67, "QPS w/o loadgen overhead": 378.61, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA A10", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.839189, "characteristics.90th_percentile_latency_ns": 2839189.0, "characteristics.90th_percentile_latency_s": 0.002839189, "characteristics.90th_percentile_latency_us": 2839.189, "ck_system": "A10x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A10x1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A10x1_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (1x A10, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 357.143, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "34890925ab40419e", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 2137813, "90.00 percentile latency (ns)": 2536262, "90th percentile latency (ns)": 2536262, "95.00 percentile latency (ns)": 2667831, "97.00 percentile latency (ns)": 2805650, "99.00 percentile latency (ns)": 2846000, "99.90 percentile latency (ns)": 2866361, "Max latency (ns)": 9113280, "Mean latency (ns)": 2211811, "Min duration satisfied": "Yes", "Min latency (ns)": 1987873, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 446.47, "QPS w/o loadgen overhead": 452.12, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 2.536262, "characteristics.90th_percentile_latency_ns": 2536262.0, "characteristics.90th_percentile_latency_s": 0.002536262, "characteristics.90th_percentile_latency_us": 2536.262, "ck_system": "A30x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30x1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30x1_TRT", "system_name": "Gigabyte G482-Z54 (1x A30, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "9b33162d14d979c1", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 12596033, "90.00 percentile latency (ns)": 14210247, "90th percentile latency (ns)": 14210247, "95.00 percentile latency (ns)": 14471723, "97.00 percentile latency (ns)": 14702248, "99.00 percentile latency (ns)": 14836327, "99.90 percentile latency (ns)": 14874617, "Max latency (ns)": 25200337, "Mean latency (ns)": 12682713, "Min duration satisfied": "Yes", "Min latency (ns)": 5173755, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 78.62, "QPS w/o loadgen overhead": 78.85, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A30 (1x1g.3gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 14.210247, "characteristics.90th_percentile_latency_ns": 14210247.0, "characteristics.90th_percentile_latency_s": 0.014210247, "characteristics.90th_percentile_latency_us": 14210.247, "ck_system": "A30-MIG_1x1g.3gb_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A30-MIG_1x1g.3gb_TRT_Triton", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.46, DALI 0.30.0, Triton 21.02; GCC 7.5.0; Python 3.7.10", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "preview", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A30-MIG_1x1g.3gb_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 70.2801, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "436eb94b0a5499de", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 4641744, "90.00 percentile latency (ns)": 6371598, "90th percentile latency (ns)": 6371598, "95.00 percentile latency (ns)": 6467618, "97.00 percentile latency (ns)": 6578386, "99.00 percentile latency (ns)": 6618431, "99.90 percentile latency (ns)": 6628529, "Max latency (ns)": 8655130, "Mean latency (ns)": 4777292, "Min duration satisfied": "Yes", "Min latency (ns)": 3705278, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 208.99, "QPS w/o loadgen overhead": 209.32, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "80 GB", "accelerator_memory_configuration": "HBM2e", "accelerator_model_name": "NVIDIA A100-SXM-80GB (1x1g.10gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 6.371598, "characteristics.90th_percentile_latency_ns": 6371598.0, "characteristics.90th_percentile_latency_s": 0.006371598, "characteristics.90th_percentile_latency_us": 6371.598, "ck_system": "DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "2 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM-80GB-MIG_1x1g.10gb_TRT", "system_name": "NVIDIA DGX-A100 (1x A100-SXM-80GB-MIG-1x1g.10gb, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "4d58e4c4e942f001", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1587588, "90.00 percentile latency (ns)": 1728887, "90th percentile latency (ns)": 1728887, "95.00 percentile latency (ns)": 1789653, "97.00 percentile latency (ns)": 1835843, "99.00 percentile latency (ns)": 1848947, "99.90 percentile latency (ns)": 2708762, "Max latency (ns)": 8936093, "Mean latency (ns)": 1617308, "Min duration satisfied": "Yes", "Min latency (ns)": 1479985, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 606.84, "QPS w/o loadgen overhead": 618.31, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40 GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe-40GB", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 1.728887, "characteristics.90th_percentile_latency_ns": 1728887.0, "characteristics.90th_percentile_latency_s": 0.001728887, "characteristics.90th_percentile_latency_us": 1728.887, "ck_system": "A100-PCIex1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2.3, CUDA 11.1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "TensorRT 7.2.3, CUDA 11.1, cuDNN 8.1.1, Driver 460.32.03, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT", "system_name": "Gigabyte G482-Z54 (1x A100-PCIe, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "5f3049f4424b8304", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 37371589, "90.00 percentile latency (ns)": 48929147, "90th percentile latency (ns)": 48929147, "95.00 percentile latency (ns)": 51050583, "97.00 percentile latency (ns)": 52782367, "99.00 percentile latency (ns)": 53079807, "99.90 percentile latency (ns)": 53179914, "Max latency (ns)": 54517860, "Mean latency (ns)": 38018507, "Min duration satisfied": "Yes", "Min latency (ns)": 14578089, "Min queries satisfied": "Yes", "Mode": "PerformanceOnly", "QPS w/ loadgen overhead": 26.29, "QPS w/o loadgen overhead": 26.3, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "boot_firmware_version": "", "characteristics.90th_percentile_latency_ms": 48.929147, "characteristics.90th_percentile_latency_ns": 48929147.0, "characteristics.90th_percentile_latency_s": 0.048929147, "characteristics.90th_percentile_latency_us": 48929.147, "ck_system": "Xavier_NX_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "disk_controllers": "", "disk_drives": "", "division": "closed", "filesystem": "", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2", "host_memory_capacity": "8 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "management_firmware_version": "", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 600000, "min_query_count": 1024, "mlperf_version": 1.0, "network_speed_mbit": "", "nics_enabled_connected": "", "nics_enabled_firmware": "", "nics_enabled_os": "", "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v1.0/tree/master/closed/NVIDIA/results/Xavier_NX_TRT", "number_of_nodes": 1, "number_of_type_nics_installed": "", "operating_system": "Ubuntu 18.04.4", "other_hardware": "", "other_software_stack": "21.03 Jetson CUDA-X AI Developer Preview, TensorRT 7.2.3, CUDA 10.2, cuDNN 8.0.0, DALI 0.30.0", "performance_issue_same": 0, "performance_issue_same_index": 0, "performance_issue_unique": 0, "performance_sample_count": 10833, "power_management": "", "power_supply_details": "", "power_supply_quantity_and_rating_watts": "", "print_timestamps": 0, "problem": false, "qsl_rng_seed": 7322528924094909334, "retraining": "N", "sample_index_rng_seed": 1570999273408051088, "samples_per_query": 1, "schedule_rng_seed": 3507442325620259414, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT", "system_name": "NVIDIA Jetson Xavier NX (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "9a72491452e8891f", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1593159, "90.00 percentile latency (ns)": 1741417, "90th percentile latency (ns)": 1741417, "95.00 percentile latency (ns)": 1817289, "97.00 percentile latency (ns)": 1856713, "99.00 percentile latency (ns)": 1868495, "99.90 percentile latency (ns)": 1924961, "Max latency (ns)": 5953889, "Mean latency (ns)": 1622671, "Min duration satisfied": "Yes", "Min latency (ns)": 1500284, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 613.73, "QPS w/o loadgen overhead": 616.27, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 1.741417, "characteristics.90th_percentile_latency_ns": 1741417.0, "characteristics.90th_percentile_latency_s": 0.001741417, "characteristics.90th_percentile_latency_us": 1741.417, "ck_system": "DGX-A100_A100-SXM4x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x1_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x1_TRT", "system_name": "NVIDIA DGX-A100 (1x A100-SXM4, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "af45078950534497", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 9192581, "90.00 percentile latency (ns)": 10992868, "90th percentile latency (ns)": 10992868, "95.00 percentile latency (ns)": 11096462, "97.00 percentile latency (ns)": 11242276, "99.00 percentile latency (ns)": 11325992, "99.90 percentile latency (ns)": 11406353, "Max latency (ns)": 11503325, "Mean latency (ns)": 9055954, "Min duration satisfied": "Yes", "Min latency (ns)": 4591783, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 110.37, "QPS w/o loadgen overhead": 110.42, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "5GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4 (1x1g.5gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 10.992868, "characteristics.90th_percentile_latency_ns": 10992868.0, "characteristics.90th_percentile_latency_s": 0.010992868, "characteristics.90th_percentile_latency_us": 10992.868, "ck_system": "DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "9b84c5e1acde4d03", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1861709, "90.00 percentile latency (ns)": 1949327, "90th percentile latency (ns)": 1949327, "95.00 percentile latency (ns)": 2017215, "97.00 percentile latency (ns)": 2107844, "99.00 percentile latency (ns)": 3926934, "99.90 percentile latency (ns)": 4183909, "Max latency (ns)": 9800513, "Mean latency (ns)": 1924593, "Min duration satisfied": "Yes", "Min latency (ns)": 1759140, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 510.9, "QPS w/o loadgen overhead": 519.59, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 1.949327, "characteristics.90th_percentile_latency_ns": 1949327.0, "characteristics.90th_percentile_latency_s": 0.001949327, "characteristics.90th_percentile_latency_us": 1949.327, "ck_system": "A100-PCIex1_TRT_Triton", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT_Triton", "system_name": "Gigabyte G482-Z52 (1x A100-PCIe, TensorRT, Triton)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "4e4527a68eb28416", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 20407911, "90.00 percentile latency (ns)": 30806756, "90th percentile latency (ns)": 30806756, "95.00 percentile latency (ns)": 31752718, "97.00 percentile latency (ns)": 32290439, "99.00 percentile latency (ns)": 32681369, "99.90 percentile latency (ns)": 33080779, "Max latency (ns)": 33384184, "Mean latency (ns)": 21444718, "Min duration satisfied": "Yes", "Min latency (ns)": 9894693, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 46.58, "QPS w/o loadgen overhead": 46.63, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA AGX Xavier", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 30.806756, "characteristics.90th_percentile_latency_ns": 30806756.0, "characteristics.90th_percentile_latency_s": 0.030806756, "characteristics.90th_percentile_latency_us": 30806.756, "ck_system": "AGX_Xavier_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "20.09 Jetson CUDA-X AI Developer Preview, TensorRT 7.2, CUDA 10.2", "host_memory_capacity": "32GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 8, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/AGX_Xavier_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "20.09 Jetson CUDA-X AI Developer Preview, TensorRT 7.2, CUDA 10.2, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/AGX_Xavier_TRT", "system_name": "NVIDIA Jetson AGX Xavier 32GB (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 32.2581, "task": "NLP", "task2": "nlp", "total_cores": 8, "uid": "740308d2f76247a2", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1770962, "90.00 percentile latency (ns)": 1860460, "90th percentile latency (ns)": 1860460, "95.00 percentile latency (ns)": 1924630, "97.00 percentile latency (ns)": 1991766, "99.00 percentile latency (ns)": 3032179, "99.90 percentile latency (ns)": 3411420, "Max latency (ns)": 7820541, "Mean latency (ns)": 1816198, "Min duration satisfied": "Yes", "Min latency (ns)": 1692665, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 548.5, "QPS w/o loadgen overhead": 550.6, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 1.86046, "characteristics.90th_percentile_latency_ns": 1860460.0, "characteristics.90th_percentile_latency_s": 0.00186046, "characteristics.90th_percentile_latency_us": 1860.46, "ck_system": "DGX-A100_A100-SXM4x1_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x1_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x1_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "5d4c0ff6c4ffd9e3", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 4950825, "90.00 percentile latency (ns)": 6759718, "90th percentile latency (ns)": 6759718, "95.00 percentile latency (ns)": 6881997, "97.00 percentile latency (ns)": 7026058, "99.00 percentile latency (ns)": 7055242, "99.90 percentile latency (ns)": 7096350, "Max latency (ns)": 7931275, "Mean latency (ns)": 5128691, "Min duration satisfied": "Yes", "Min latency (ns)": 4169790, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 194.68, "QPS w/o loadgen overhead": 194.98, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "5GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-SXM4 (1x1g.5gb MIG)", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 6.759718, "characteristics.90th_percentile_latency_ns": 6759718.0, "characteristics.90th_percentile_latency_s": 0.006759718, "characteristics.90th_percentile_latency_us": 6759.718, "ck_system": "DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "1 TB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "15 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/DGX-A100_A100-SXM4x1-MIG_1x1g.5gb_TRT", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "dca4463f923e8b40", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 6165245, "90.00 percentile latency (ns)": 6782306, "90th percentile latency (ns)": 6782306, "95.00 percentile latency (ns)": 7074628, "97.00 percentile latency (ns)": 7363205, "99.00 percentile latency (ns)": 9390405, "99.90 percentile latency (ns)": 9769979, "Max latency (ns)": 14268514, "Mean latency (ns)": 6273890, "Min duration satisfied": "Yes", "Min latency (ns)": 5380086, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 157.52, "QPS w/o loadgen overhead": 159.39, "Result is": "VALID", "SUT name": "Triton_Server", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 6.782306, "characteristics.90th_percentile_latency_ns": 6782306.0, "characteristics.90th_percentile_latency_s": 0.006782306, "characteristics.90th_percentile_latency_us": 6782.306, "ck_system": "T4x1_TRT_Triton", "ck_used": true, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 120, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7V13 64-Core Processor", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x1_TRT_Triton", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.5 LTS (Linux-5.4.0-1055-azure-x86_64-with-Ubuntu-18.04-bionic)", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0, Triton 20.09; GCC 7.5.0; Python 3.7.10", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "Powered by CK v2.5.8 (https://github.com/ctuning/ck)", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x1_TRT_Triton", "system_name": "Microsoft Corporation 7.0 (Virtual Machine)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 156.25, "task": "NLP", "task2": "nlp", "total_cores": 240, "uid": "0180a6724af085ba", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 5282955, "90.00 percentile latency (ns)": 6459832, "90th percentile latency (ns)": 6459832, "95.00 percentile latency (ns)": 6699812, "97.00 percentile latency (ns)": 6799294, "99.00 percentile latency (ns)": 6952726, "99.90 percentile latency (ns)": 7297947, "Max latency (ns)": 10833442, "Mean latency (ns)": 5373520, "Min duration satisfied": "Yes", "Min latency (ns)": 4242379, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 183.53, "QPS w/o loadgen overhead": 186.1, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 6.459832, "characteristics.90th_percentile_latency_ns": 6459832.0, "characteristics.90th_percentile_latency_s": 0.006459832, "characteristics.90th_percentile_latency_us": 6459.832, "ck_system": "T4x1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/T4x1_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x1_TRT", "system_name": "Supermicro 4029GP-TRT-OTO-28 (1x T4, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 156.25, "task": "NLP", "task2": "nlp", "total_cores": 56, "uid": "84200459b4236b5b", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 1613042, "90.00 percentile latency (ns)": 1759950, "90th percentile latency (ns)": 1759950, "95.00 percentile latency (ns)": 1836768, "97.00 percentile latency (ns)": 1880788, "99.00 percentile latency (ns)": 1899938, "99.90 percentile latency (ns)": 1974436, "Max latency (ns)": 5714402, "Mean latency (ns)": 1643462, "Min duration satisfied": "Yes", "Min latency (ns)": 1513994, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 599.12, "QPS w/o loadgen overhead": 608.47, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "40GB", "accelerator_memory_configuration": "HBM2", "accelerator_model_name": "NVIDIA A100-PCIe", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 1.75995, "characteristics.90th_percentile_latency_ns": 1759950.0, "characteristics.90th_percentile_latency_s": 0.00175995, "characteristics.90th_percentile_latency_us": 1759.95, "ck_system": "A100-PCIex1_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "TensorRT 7.2, CUDA 11.0 Update 1", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 64, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "AMD EPYC 7742", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/A100-PCIex1_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "TensorRT 7.2, CUDA 11.0 Update 1, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/A100-PCIex1_TRT", "system_name": "Gigabyte G482-Z52 (1x A100-PCIe, TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 588.235, "task": "NLP", "task2": "nlp", "total_cores": 128, "uid": "fce1b4526aa6e84c", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile latency (ns)": 37473625, "90.00 percentile latency (ns)": 48881385, "90th percentile latency (ns)": 48881385, "95.00 percentile latency (ns)": 50197687, "97.00 percentile latency (ns)": 51641164, "99.00 percentile latency (ns)": 52900412, "99.90 percentile latency (ns)": 53128028, "Max latency (ns)": 53411101, "Mean latency (ns)": 37718340, "Min duration satisfied": "Yes", "Min latency (ns)": 14970908, "Min queries satisfied": "Yes", "Mode": "Performance", "QPS w/ loadgen overhead": 26.5, "QPS w/o loadgen overhead": 26.51, "Result is": "VALID", "SUT name": "BERT SERVER", "Scenario": "singlestream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier NX", "accelerator_on-chip_memories": "", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "accuracy_log_sampling_target": 0, "characteristics.90th_percentile_latency_ms": 48.881385, "characteristics.90th_percentile_latency_ns": 48881385.0, "characteristics.90th_percentile_latency_s": 0.048881385, "characteristics.90th_percentile_latency_us": 48881.385, "ck_system": "Xavier_NX_TRT", "ck_used": false, "cooling": "", "dataset": "SQuAD v1.1", "dataset_link": "", "dim_x_default": "seq_number", "dim_y_default": "characteristics.90th_percentile_latency_ms", "dim_y_maximize": false, "division": "closed", "formal_model": "bert", "formal_model_accuracy": 99.0, "formal_model_link": "", "framework": "20.09 Jetson CUDA-X AI Developer Preview, TensorRT 7.2, CUDA 10.2", "host_memory_capacity": "8GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 6, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32GB", "host_storage_type": "Micro SD Card", "hw_notes": "GPU and both DLAs are used in resnet50, ssd-mobilenet, and ssd-resnet34, in Offline and MultiStream scenarios", "informal_model": "bert-99", "input_data_types": "int32", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 1024, "mlperf_version": 0.7, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.7/tree/master/closed/NVIDIA/results/Xavier_NX_TRT", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.4", "other_software_stack": "20.09 Jetson CUDA-X AI Developer Preview, TensorRT 7.2, CUDA 10.2, cuDNN 8.0.2, DALI 0.25.0", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 10833, "print_timestamps": true, "problem": false, "qsl_rng_seed": 12786827339337101903, "retraining": "N", "sample_index_rng_seed": 12640797754436136668, "samples_per_query": 1, "schedule_rng_seed": 3135815929913719677, "starting_weights_filename": "bert_large_v1_1_fake_quant.onnx", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier_NX_TRT", "system_name": "NVIDIA Jetson Xavier NX (TensorRT)", "system_type": "edge", "target_latency (ns)": 0, "target_qps": 20, "task": "NLP", "task2": "nlp", "total_cores": 6, "uid": "4d1e99f21bc9b05d", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" } ]