# Invocation command line: # /home/root/NFS/bin/runspec --config=lenovoM_2node.cfg --flagsurl=/home/root/NFS/Lenovo_Specmpim_Flags.xml --size=mref --tune=base --reportable --rank=112 --iterations=3 medium # output_root was not used for this run ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 17.0.2 and Intel MPI 17.1.132) # ##################################################################### env_vars=1 basepeak=1 reportable=1 ignore_errors=0 iterations=3 makeflags= -j 8 tune=base size=mref use_version_url=1 use_submit_for_speed =1 output_format= all FC= mpiifort CC= mpiicc CXX= mpiicpc #include: cyborg2Node.inc # ----- Begin inclusion of 'cyborg2Node.inc' ############################################################################ ################################################################# # Notes ################################################################# test_sponsor = Lenovo Global Technology license_num = 28 tester = Lenovo Global Technology test_date = Jun-2017 hw_avail = Aug-2017 sw_avail = Aug-2017 prepared_by = Lenovo Global Technology system_vendor = Lenovo Global Technology system_name000 = ThinkSystem SR650 system_name001 = (Intel Xeon Platinum 8180 CPU, 2.50 GHz) # # Computation node info [Node_Description: Hardware] # node_compute_label = ThinkSystem SR650 node_compute_order = 1 node_compute_count = 2 node_compute_purpose = compute node_compute_hw_vendor = Lenovo Global Technology node_compute_hw_model = SR650 node_compute_hw_cpu_name = Intel Xeon Platinum 8180 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 56 node_compute_hw_ncoresperchip = 56 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = None node_compute_hw_cpu_mhz = 2500 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache000= 38.5 MB I+D on chip per chip node_compute_hw_tcache001 = shared / 28 cores node_compute_hw_ocache = None node_compute_hw_memory = 768 GB (24 x 32 GB 2Rx4 PC4-2666V) node_compute_hw_disk = 800 GB 12 Gbps SAS 2.5" SSD (JBOD) node_compute_hw_other = ThinkSystem RAID 930-8i 2GB Flash # #[Node_Description: Software] # node_compute_hw_adapter_fs_model = Intel Omni-Path Fabric Adapter 100 Series node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCI-Express x16 node_compute_hw_adapter_fs_data_rate = 100 Gb/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Direct Attached Cable (DAC) node_compute_hw_adapter_fs_driver = IFS 10.4.2.0.7 node_compute_hw_adapter_fs_firmware = 10.4.0.0.146 node_compute_sw_os000 = Red Hat Enterprise Linux Server release 7.3, node_compute_sw_os001 = Kernel 3.10.0-514.el7.x86_64 node_compute_sw_localfile = xfs node_compute_sw_sharedfile = nfs node_compute_sw_state = Multi-user node_compute_sw_other = NONE # # Fileserver node info # #node_fileserver_label = NFS #node_fileserver_order = 2 #node_fileserver_count = 1 #node_fileserver_purpose = fileserver #node_fileserver_hw_vendor = Intel #node_fileserver_hw_model = S7000FC4UR #node_fileserver_hw_cpu_name = Intel Xeon CPU #node_fileserver_hw_ncpuorder = 1-4 chips #node_fileserver_hw_nchips = 4 #node_fileserver_hw_ncores = 16 #node_fileserver_hw_ncoresperchip = 4 #node_fileserver_hw_nthreadspercore = 2 #node_fileserver_hw_cpu_char = -- #node_fileserver_hw_cpu_mhz = 2926 #node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core #node_fileserver_hw_scache = 8 MB I+D on chip per chip, 4 MB shared / 2 cores #node_fileserver_hw_tcache = None #node_fileserver_hw_ocache = None #node_fileserver_hw_memory = 64 GB #node_fileserver_hw_disk = 8 disks, 500GB/disk, 2.7TB total #node_fileserver_hw_other = None #node_fileserver_hw_adapter_fs_model000 = Intel 82563GB Dual-Port Gigabit #node_fileserver_hw_adapter_fs_model001 = Ethernet Controller #node_fileserver_hw_adapter_fs_count = 1 #node_fileserver_hw_adapter_fs_slot_type = PCI-Express x8 #node_fileserver_hw_adapter_fs_data_rate = 1Gbps Ethernet #node_fileserver_hw_adapter_fs_ports_used = 1 #node_fileserver_hw_adapter_fs_interconnect = Ethernet #node_fileserver_hw_adapter_fs_driver = e1000e #node_fileserver_hw_adapter_fs_firmware = N/A #node_fileserver_sw_os = RedHat 7 Update 3 #node_fileserver_sw_localfile = None #node_fileserver_sw_sharedfile = NFS #node_fileserver_sw_state = Multi-User #node_fileserver_sw_other = None # # IB interconnect Mellanox # #interconnect_ib_label = IB Switch #interconnect_ib_order = 1 #interconnect_ib_purpose = MPI traffic #interconnect_ib_hw_vendor = Mellanox #interconnect_ib_hw_model = Mellanox MSX6025F-1BFR #interconnect_ib_hw_switch_3600_model = Mellanox MSX6025F-1BFR #interconnect_ib_hw_switch_3600_count = 46 #interconnect_ib_hw_switch_3600_ports = 36 #interconnect_ib_hw_topo = Fat tree #interconnect_ib_hw_switch_3600_data_rate = InfiniBand 4x FDR #interconnect_ib_hw_switch_3600_firmware = 9.2.8000 # # Cluster file system interconnect # interconnect_fs_label = Intel Omni-Path interconnect_fs_order = 0 interconnect_fs_purpose = MPI Traffic interconnect_fs_hw_vendor = Intel interconnect_fs_hw_model = Intel Omni-Path Fabric Adapter 100 Series interconnect_fs_hw_switch_fs_model = N/A interconnect_fs_hw_switch_fs_count = 0 interconnect_fs_hw_switch_fs_ports = 0 interconnect_fs_hw_topo = Direct Connect interconnect_fs_hw_switch_fs_data_rate = N/A interconnect_fs_hw_switch_fs_firmware = N/A # # Hardware # system_class = Homogeneous max_ranks = 28 max_peak_ranks = N/A # # Right_Software # sw_c_compiler000= Intel C++ Compiler 17.0 Update 4 for Linux sw_c_compiler001 = Version 17.0.4 Buiild 20170411 sw_cxx_compiler000= Intel C++ Compiler 17.0 Update 4 for Linux sw_cxx_compiler001 = Version 17.0.4 Build 20170411 sw_f_compiler = Intel Fortran Compiler 17.0 Update 4 for Linux sw_f_compiler00281 = Version 17.0.4 Build 20170411 sw_auto_parallel = sw_base_ptrsize = 64-bit sw_peak_ptrsize = Not Applicable sw_mpi_library000 = Intel MPI Library for Linux* OS sw_mpi_library001 = Version 2017 Update 3 Build 20170405 sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # notes_000 = MPI startup command: notes_005 = mpiexec command was used to start MPI jobs. notes_010 = notes_015 = RAM configuration: notes_020 = Compute nodes have 2x32GB RDIMM on each memory channel. notes_025 = notes_030 = Add "intel_idle.max_cstate=0 intel_pstate=disable" into grub notes_035 = notes_040 = BIOS settings: notes_045 = Operating Mode : Maximum Performance Mode notes_050 = Intel Hyper-Threading Technology (SMT): Disabled notes_055 = SNC (Sub-NUMA Cluster) : Enable notes_060 = # ---- End inclusion of '/home/root/NFS/config/cyborg2Node.inc' ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 129.tera_tf=default=default=default: #srcalt=add_rank_support 130.socorro=default=default=default: #srcalt=nullify_ptrs FPORTABILITY = -assume nostd_intent_in 143.dleslie=default=default=default: #srcalt=integer_overflow ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -ipo -xCORE-AVX512 -no-prec-div #submit = mpiexec.hydra -hosts 192.168.99.1 -genv I_MPI_PROVIDER rdma -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command -genv I_MPI_PIN_DOMAIN=numa #submit = mpiexec.hydra -hosts 192.168.99.2 -genv I_MPI_PROVIDER psm2 -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command -genv I_MPI_PIN_DOMAIN=node #OK submit = mpiexec -hosts 192.168.99.1,192.168.99.3 -genv I_MPI_PROVIDER psm2 -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command #submit = mpiexec -hosts 192.168.99.1,192.168.99.3 -genv I_MPI_PROVIDER PSM2 -genv I_MPI_FABRICS SHM:TMI -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command submit = mpiexec -hosts 192.168.99.1,192.168.99.3 -genv I_MPI_PROVIDER PSM2 -genv I_MPI_FABRICS SHM:TMI -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command #submit = mpiexec.hydra -hosts 192.168.99.1,192.168.99.2 -genv I_MPI_PROVIDER psm2 -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -genv I_MPI_DEBUG=5 -genv I_MPI_PIN_DOMAIN=CORE -n $ranks $command # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/Lenovo-SPECmpiM_Platform_Flags.xml flagsurl001 = http://www.spec.org/mpi2007/flags/EM64T_Intel17_flags.xml