[ { "50.00 percentile": 1, "50.00 percentile latency (ns)": 26339472, "90.00 percentile": 1, "90.00 percentile latency (ns)": 43077606, "95.00 percentile": 1, "95.00 percentile latency (ns)": 45774913, "97.00 percentile": 1, "97.00 percentile latency (ns)": 46206702, "99.00 percentile": 1, "99.00 percentile latency (ns)": 46632430, "99.90 percentile": 1, "99.90 percentile latency (ns)": 47043952, "Max latency (ns)": 87508376, "Mean latency (ns)": 26314523, "Min duration satisfied": "Yes", "Min latency (ns)": 5586854, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 3640, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA TITAN RTX", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 22.911, "characteristics.samples_per_query": 3640, "characteristics.samples_per_query.normalized_per_core": 910.0, "characteristics.samples_per_query.normalized_per_processor": 910.0, "ck_system": "TitanRTXx4", "ck_used": false, "cooling": "watercooled", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "TensorRT 6.0, CUDA 10.1, cuDNN 7.6.3, libjemalloc2, cub 1.8.0, tensorrt-laboratory mlperf branch", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 24, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) 8268", "host_processors_per_node": 2, "host_storage_capacity": "3.84 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "ssd-small", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/TitanRTXx4", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "docker 18.09.2, python 3.6.8,gcc 5.5.0,onnx 1.3.0, tensorflow 1.13.1, pytorch 1.1.0, torchvision 0.3.0, pycuda 2019.1, sacrebleu 1.3.3, SimpleJSON, OpenCV 4.1.1", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 256, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 3640, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "frozen_inference_graph.pb", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/TitanRTXx4", "system_name": "SCAN 3XS DBP T496X2 Fluid", "target_latency (ns)": 50000000, "target_qps": 20, "task": "object detection", "task2": "object detection", "total_cores": 48, "uid": "4045bffb644aa112", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile": 1, "50.00 percentile latency (ns)": 60960814, "90.00 percentile": 1, "90.00 percentile latency (ns)": 62021689, "95.00 percentile": 1, "95.00 percentile latency (ns)": 62203371, "97.00 percentile": 1, "97.00 percentile latency (ns)": 62292503, "99.00 percentile": 1, "99.00 percentile latency (ns)": 62465284, "99.90 percentile": 1, "99.90 percentile latency (ns)": 62673662, "Max latency (ns)": 111500633, "Mean latency (ns)": 60892826, "Min duration satisfied": "Yes", "Min latency (ns)": 58091878, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 88, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "24 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA TITAN RTX", "accelerator_on-chip_memories": "", "accelerators_per_node": 4, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 20.067, "characteristics.samples_per_query": 88, "characteristics.samples_per_query.normalized_per_core": 22.0, "characteristics.samples_per_query.normalized_per_processor": 22.0, "ck_system": "TitanRTXx4", "ck_used": false, "cooling": "watercooled", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "TensorRT 6.0, CUDA 10.1, cuDNN 7.6.3, libjemalloc2, cub 1.8.0, tensorrt-laboratory mlperf branch", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 24, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) 8268", "host_processors_per_node": 2, "host_storage_capacity": "3.84 TB", "host_storage_type": "NVMe SSD", "hw_notes": "", "informal_model": "ssd-large", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 4, "normalize_processors": 4, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/TitanRTXx4", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "docker 18.09.2, python 3.6.8,gcc 5.5.0,onnx 1.3.0, tensorflow 1.13.1, pytorch 1.1.0, torchvision 0.3.0, pycuda 2019.1, sacrebleu 1.3.3, SimpleJSON, OpenCV 4.1.1", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 64, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 88, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "resnet34-ssd1200.pytorch", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/TitanRTXx4", "system_name": "SCAN 3XS DBP T496X2 Fluid", "target_latency (ns)": 66666666, "target_qps": 15, "task": "object detection", "task2": "object detection", "total_cores": 48, "uid": "8624bb4a63d00ad2", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile": 1, "50.00 percentile latency (ns)": 44804798, "90.00 percentile": 1, "90.00 percentile latency (ns)": 44942671, "95.00 percentile": 1, "95.00 percentile latency (ns)": 44985022, "97.00 percentile": 1, "97.00 percentile latency (ns)": 45018946, "99.00 percentile": 1, "99.00 percentile latency (ns)": 45119072, "99.90 percentile": 1, "99.90 percentile latency (ns)": 45587805, "Max latency (ns)": 55446906, "Mean latency (ns)": 43945734, "Min duration satisfied": "Yes", "Min latency (ns)": 36801730, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 102, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier", "accelerator_on-chip_memories": "1MB (128KB/SM) L1 + 512KB L2 + 4MB (DLA)", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 22.936, "characteristics.samples_per_query": 102, "characteristics.samples_per_query.normalized_per_core": 102.0, "characteristics.samples_per_query.normalized_per_processor": 102.0, "ck_system": "Xavier", "ck_used": false, "cooling": "", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "JetPack 4.3 DP, TensorRT 6.0, cuDNN 7.6.3, CUDA 10.0, cub 1.8.0", "host_memory_capacity": "16 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "8MB L2 (2MB per dual cluster)/4 MB L3 (shared)", "host_processor_core_count": 8, "host_processor_frequency": "2265.5 MHz", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in Offline and MultiStream scenarios", "informal_model": "ssd-small", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/Xavier", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "pycuda 2019.1, pytorch 1.1, torchvision 0.2.2.post3", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 256, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 102, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "frozen_inference_graph.pb", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier", "system_name": "NVIDIA Jetson AGX Xavier", "target_latency (ns)": 50000000, "target_qps": 20, "task": "object detection", "task2": "object detection", "total_cores": 8, "uid": "1eb6bd0248cf8734", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile": 1, "50.00 percentile latency (ns)": 57230193, "90.00 percentile": 1, "90.00 percentile latency (ns)": 57325083, "95.00 percentile": 1, "95.00 percentile latency (ns)": 57384842, "97.00 percentile": 1, "97.00 percentile latency (ns)": 57430198, "99.00 percentile": 1, "99.00 percentile latency (ns)": 57478177, "99.90 percentile": 1, "99.90 percentile latency (ns)": 57535617, "Max latency (ns)": 58628874, "Mean latency (ns)": 57243922, "Min duration satisfied": "Yes", "Min latency (ns)": 57063179, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 2, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "Shared with host", "accelerator_memory_configuration": "SRAM", "accelerator_model_name": "NVIDIA Xavier", "accelerator_on-chip_memories": "1MB (128KB/SM) L1 + 512KB L2 + 4MB (DLA)", "accelerators_per_node": 1, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 20.067, "characteristics.samples_per_query": 2, "characteristics.samples_per_query.normalized_per_core": 2.0, "characteristics.samples_per_query.normalized_per_processor": 2.0, "ck_system": "Xavier", "ck_used": false, "cooling": "", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "JetPack 4.3 DP, TensorRT 6.0, cuDNN 7.6.3, CUDA 10.0, cub 1.8.0", "host_memory_capacity": "16 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "8MB L2 (2MB per dual cluster)/4 MB L3 (shared)", "host_processor_core_count": 8, "host_processor_frequency": "2265.5 MHz", "host_processor_interconnect": "", "host_processor_model_name": "NVIDIA Carmel (ARMv8.2)", "host_processors_per_node": 1, "host_storage_capacity": "32 GB", "host_storage_type": "eMMC 5.1", "hw_notes": "GPU and both DLAs are used in Offline and MultiStream scenarios", "informal_model": "ssd-large", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 1, "normalize_processors": 1, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/Xavier", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "pycuda 2019.1, pytorch 1.1, torchvision 0.2.2.post3", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 64, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 2, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "resnet34-ssd1200.pytorch", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/Xavier", "system_name": "NVIDIA Jetson AGX Xavier", "target_latency (ns)": 66666666, "target_qps": 15, "task": "object detection", "task2": "object detection", "total_cores": 8, "uid": "b790ca88d41a3a57", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile": 1, "50.00 percentile latency (ns)": 25799516, "90.00 percentile": 1, "90.00 percentile latency (ns)": 42096960, "95.00 percentile": 1, "95.00 percentile latency (ns)": 43445138, "97.00 percentile": 1, "97.00 percentile latency (ns)": 44041334, "99.00 percentile": 1, "99.00 percentile latency (ns)": 44941130, "99.90 percentile": 1, "99.90 percentile latency (ns)": 46061622, "Max latency (ns)": 55074627, "Mean latency (ns)": 25616074, "Min duration satisfied": "Yes", "Min latency (ns)": 6244404, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 2624, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA Tesla T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 22.912, "characteristics.samples_per_query": 2624, "characteristics.samples_per_query.normalized_per_core": 328.0, "characteristics.samples_per_query.normalized_per_processor": 328.0, "ck_system": "T4x8", "ck_used": false, "cooling": "", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "TensorRT 6.0, CUDA 10.1, cuDNN 7.6.3, libjemalloc2, cub 1.8.0, tensorrt-laboratory mlperf branch", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "ssd-small", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/T4x8", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "docker 18.09.2, python 3.6.8,gcc 5.5.0,onnx 1.3.0, tensorflow 1.13.1, pytorch 1.1.0, torchvision 0.3.0, pycuda 2019.1, sacrebleu 1.3.3, SimpleJSON, OpenCV 4.1.1", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 256, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 2624, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "frozen_inference_graph.pb", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8", "system_name": "Supermicro 4029GP-TRT-OTO-28 8xT4", "target_latency (ns)": 50000000, "target_qps": 20, "task": "object detection", "task2": "object detection", "total_cores": 56, "uid": "94454c8de731d583", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" }, { "50.00 percentile": 1, "50.00 percentile latency (ns)": 30744654, "90.00 percentile": 1, "90.00 percentile latency (ns)": 53501354, "95.00 percentile": 1, "95.00 percentile latency (ns)": 54484516, "97.00 percentile": 1, "97.00 percentile latency (ns)": 54991364, "99.00 percentile": 1, "99.00 percentile latency (ns)": 55871884, "99.90 percentile": 1, "99.90 percentile latency (ns)": 56968755, "Max latency (ns)": 58431432, "Mean latency (ns)": 31168022, "Min duration satisfied": "Yes", "Min latency (ns)": 7058736, "Min queries satisfied": "Yes", "Mode": "Performance", "Per-sample latency": "", "Performance constraints satisfied": "Yes", "Result is": "VALID", "SUT name": "LWIS_Server", "Samples per query": 56, "Scenario": "multistream", "accelerator_frequency": "", "accelerator_host_interconnect": "", "accelerator_interconnect": "", "accelerator_interconnect_topology": "", "accelerator_memory_capacity": "16 GB", "accelerator_memory_configuration": "GDDR6", "accelerator_model_name": "NVIDIA Tesla T4", "accelerator_on-chip_memories": "", "accelerators_per_node": 8, "accuracy_log_probability": 0, "accuracy_log_rng_seed": 0, "characteristics.mAP": 20.067, "characteristics.samples_per_query": 56, "characteristics.samples_per_query.normalized_per_core": 7.0, "characteristics.samples_per_query.normalized_per_processor": 7.0, "ck_system": "T4x8", "ck_used": false, "cooling": "", "dataset": "COCO 2017 (300x300)", "dataset_link": "https://github.com/ctuning/ck/blob/master/docs/mlperf-automation/datasets/coco2017.md", "dim_x_default": "characteristics.samples_per_query", "dim_y_default": "characteristics.mAP", "dim_y_maximize": true, "division": "closed", "formal_model": "ssd-mobilenet", "formal_model_accuracy": 99.0, "formal_model_link": "https://github.com/mlcommons/ck-mlops/tree/main/package", "framework": "TensorRT 6.0, CUDA 10.1, cuDNN 7.6.3, libjemalloc2, cub 1.8.0, tensorrt-laboratory mlperf branch", "host_memory_capacity": "768 GB", "host_memory_configuration": "", "host_networking": "", "host_networking_topology": "", "host_processor_caches": "", "host_processor_core_count": 28, "host_processor_frequency": "", "host_processor_interconnect": "", "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz", "host_processors_per_node": 2, "host_storage_capacity": "4 TB", "host_storage_type": "NVMe SSD", "hw_notes": "ECC off", "informal_model": "ssd-large", "input_data_types": "int8", "key.accuracy": "characteristics.mAP", "max_async_queries": 1, "max_duration (ms)": 0, "max_query_count": 0, "min_duration (ms)": 60000, "min_query_count": 270336, "mlperf_version": 0.5, "normalize_cores": 8, "normalize_processors": 8, "note_code": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/code", "note_details": "https://github.com/mlcommons/inference_results_v0.5/tree/master/closed/NVIDIA/results/T4x8", "number_of_nodes": 1, "operating_system": "Ubuntu 18.04.3", "other_software_stack": "docker 18.09.2, python 3.6.8,gcc 5.5.0,onnx 1.3.0, tensorflow 1.13.1, pytorch 1.1.0, torchvision 0.3.0, pycuda 2019.1, sacrebleu 1.3.3, SimpleJSON, OpenCV 4.1.1", "performance_issue_same": true, "performance_issue_same_index": 0, "performance_issue_unique": true, "performance_sample_count": 64, "print_timestamps": true, "problem": false, "qsl_rng_seed": 3133965575612453542, "retraining": "N", "sample_index_rng_seed": 665484352860916858, "samples_per_query": 56, "schedule_rng_seed": 3622009729038561421, "starting_weights_filename": "resnet34-ssd1200.pytorch", "status": "available", "submitter": "NVIDIA", "submitter_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.submitter/NVIDIA", "sw_notes": "", "system_link": "https://github.com/ctuning/ck-mlperf-inference/tree/main/bench.mlperf.system/T4x8", "system_name": "Supermicro 4029GP-TRT-OTO-28 8xT4", "target_latency (ns)": 66666666, "target_qps": 15, "task": "object detection", "task2": "object detection", "total_cores": 56, "uid": "185b516beb291682", "use_accelerator": true, "weight_data_types": "int8", "weight_transformations": "quantization, affine fusion" } ]