# Invocation command line: # /lfs3/pshelepu/mpi2007/128/bin/runspec --reportable --config intel_mpi2007.cfg --flagsurl EM64T_Intel111_flags.xml --size mref --iterations 2 --output_format=txt --ranks=128 all # output_root was not used for this run ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 11.1 and Intel MPI 3.2) # ##################################################################### env_vars = 1 basepeak = 1 reportable = 1 ignore_errors = 0 iterations = 3 makeflags = -j 4 tune = base size = mref use_version_url = 1 version_url = current_version use_submit_for_speed = 1 output_format = all FC = mpiifort CC = mpiicc CXX = mpiicpc ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -xSSE4.2 -no-prec-div submit = mpiexec -genv I_MPI_DEVICE rdssm -genv I_MPI_FALLBACK_DEVICE disable -perhost 8 -n $ranks $command ################################################################# # Notes ################################################################# test_sponsor = Intel Corporation license_num = 13 tester = Pavel Shelepugin test_date = Jun-2009 hw_avail = Jun-2009 sw_avail = Jun-2009 prepared_by = Intel Corporation system_vendor = Intel Corporation system_name000 = Endeavor (Intel Xeon X5560, 2.80 GHz, system_name001 = DDR3-1333 MHz, SMT off, Turbo off) # # Computation node info # node_compute_label = Endeavor Node node_compute_order = 1 node_compute_count = 16 node_compute_purpose = compute node_compute_hw_vendor = Intel node_compute_hw_model = SR1600UR node_compute_hw_cpu_name = Intel Xeon X5560 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 8 node_compute_hw_ncoresperchip = 4 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char000= Intel Turbo Boost Technology disabled, node_compute_hw_cpu_char001 = 6.4 GT/s QPI, Hyper-Threading disabled node_compute_hw_cpu_mhz = 2800 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache = 8 MB I+D on chip per chip, 8 MB shared / 4 cores node_compute_hw_ocache = None node_compute_hw_memory = 24 GB (RDIMM 6x4-GB DDR3-1333 MHz) node_compute_hw_disk = Seagate 400 GB ST3400755SS node_compute_hw_other = None node_compute_hw_adapter_ib_model = Mellanox MHQH29-XTC node_compute_hw_adapter_ib_count = 1 node_compute_hw_adapter_ib_slot_type = PCIe x8 Gen2 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x QDR node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_driver = OFED 1.3.1 node_compute_hw_adapter_ib_firmware = 2.6.000 node_compute_sw_os = Red Hat EL 5.2, kernel 2.6.18-128 node_compute_sw_localfile = Linux/ext2 node_compute_sw_sharedfile = Lustre FS node_compute_sw_state = Multi-User node_compute_sw_other = PBS Pro 8.0 # # Fileserver node info # node_fileserver_label = LFS node_fileserver_order = 2 node_fileserver_count = 8 node_fileserver_purpose = fileserver node_fileserver_hw_vendor = Intel node_fileserver_hw_model = SR1560SF node_fileserver_hw_cpu_name = Intel Xeon E5462 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 8 node_fileserver_hw_ncoresperchip = 4 node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_cpu_char = 1600 MHz FSB node_fileserver_hw_cpu_mhz = 2800 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 12 MB I+D on chip per chip, 6 MB shared / 2 cores node_fileserver_hw_tcache = None node_fileserver_hw_ocache = None node_fileserver_hw_memory = 16 GB DDR2 16x1-GB 667 MHz node_fileserver_hw_disk = Seagate 250 GB node_fileserver_hw_other = connected to DDN storage (see General Notes) node_fileserver_hw_adapter_fs_model = Mellanox MHGH28-XTC node_fileserver_hw_adapter_fs_count = 1 node_fileserver_hw_adapter_fs_slot_type = PCIe x8 Gen2 node_fileserver_hw_adapter_fs_data_rate = InfiniBand 4x DDR node_fileserver_hw_adapter_fs_ports_used = 1 node_fileserver_hw_adapter_fs_interconnect = InfiniBand node_fileserver_hw_adapter_fs_driver = OFED 1.3.1 node_fileserver_hw_adapter_fs_firmware = 2.6.000 node_fileserver_sw_os = Red Hat EL 5.2, kernel 2.6.18-53 node_fileserver_sw_localfile = None node_fileserver_sw_sharedfile = Lustre FS node_fileserver_sw_state = Multi-User node_fileserver_sw_other = None # # IB interconnect # interconnect_ib_label = IB Switch interconnect_ib_order = 1 interconnect_ib_purpose = MPI traffic, FS traffic interconnect_ib_hw_vendor = Mellanox interconnect_ib_hw_model = Mellanox MTS3600Q-1UNC interconnect_ib_hw_switch_3600_model = Mellanox MTS3600Q-1UNC interconnect_ib_hw_switch_3600_count = 46 interconnect_ib_hw_switch_3600_ports = 36 interconnect_ib_hw_topo = Fat tree interconnect_ib_hw_switch_3600_data_rate = InfiniBand 4x QDR interconnect_ib_hw_switch_3600_firmware = 7.1.000 # # Hardware # system_class = Homogeneous max_ranks = 128 max_peak_ranks = 128 # # Software # sw_c_compiler = Intel C++ Compiler 11.1 for Linux sw_cxx_compiler = Intel C++ Compiler 11.1 for Linux sw_f_compiler = Intel Fortran Compiler 11.1 for Linux sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = Intel MPI Library 3.2 for Linux sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # notes_000 = MPI startup command: notes_005 = mpiexec command was used to start MPI jobs. This command uses notes_010 = an independent ring of mpd daemons, which is started beforehand via notes_015 = mpdboot command. mpdboot was launched only once, and the corresponding notes_020 = ring of daemons was used for every iteration of each SPEC MPI component. notes_025 = So, the startup and tear-down time of the daemons was not included to notes_030 = the elapsed time and thus was not taken into account during calculation notes_035 = of the ratio. notes_040 = notes_045 = BIOS settings: notes_050 = Intel Hyper-Threading Technology (SMT): Disabled (default is Enabled) notes_055 = Intel Turbo Boost Technology (Turbo) : Disabled (default is Enabled) notes_060 = notes_065 = RAM configuration: notes_070 = Compute nodes have 1x4-GB RDIMM on each memory channel. notes_075 = notes_080 = Network: notes_085 = Forty six 36-port switches: 18 core switches and 28 leaf switches. notes_090 = Each leaf has one link to each core. Remaining 18 ports on 25 of 28 leafs notes_095 = are used for compute nodes. On the remaining 3 leafs the ports are used notes_100 = for FS nodes and other peripherals. notes_105 = notes_110 = Job placement: notes_115 = Each MPI job was assigned to a topologically compact set of nodes, i.e. notes_120 = the minimal needed number of leaf switches was used for each job: 1 switch notes_125 = for 16/32/64/128 ranks, 2 switches for 256 ranks, 4 switches for 512 ranks. notes_130 = notes_135 = Fileserver: notes_140 = Intel SR1560SF systems connected via IB to DataDirect Networks S2A9900 notes_145 = storage which is: 160 disks, 300GB/disk, 48TB total, 35TB available. notes_150 = notes_155 = PBS Pro was used for job submission. It has no impact on performance. notes_160 = Can be found at: http://www.altair.com notes_165 = notes_170 = Lustre File System 1.6.6 was used. Download from: notes_175 = http://www.sun.com/software/products/lustre # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel111_flags.xml