# Invocation command line: # /home/lijunj/SPEC/mpi-2.0.1-run/bin/runspec -c test-mc-prefer.cfg --output_format=all --ranks=128 medium --reportable --flagsurl intel_flags.xml # output_root was not used for this run ############################################################################ # Invocation command line: # /home/pshelepu/mpi2007-hsw-ex-m0072/bin/runspec --reportable --config intel_mpi2007-72.cfg --flagsurl EM64T_Intel150_flags.xml --size mref --iterations 3 --output_format=txt --ranks=72 medium # output_root was not used for this run ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 14.0.3 and Intel MPI 4.1.3) # ##################################################################### env_vars = 1 basepeak = 1 reportable = 0 ignore_errors = 0 iterations = 3 makeflags = -j 4 tune = base size = mref use_version_url = 1 version_url = version.txt use_submit_for_speed = 1 output_format = all FC = mpiifort CC = mpiicc CXX = mpiicpc ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 130.socorro=default=default=default: srcalt=nullify_ptrs FPORTABILITY = -assume nostd_intent_in ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -xMIC-AVX512 -no-prec-div -fp-model fast=2 -ipo -fma #OPTIMIZE = -O3 -xMIC-AVX512 -fp-model fast=2 -ipo #submit = mpirun -genv I_MPI_COMPATIBILITY 4 -genv I_MPI_PIN_PROCESSOR_LIST "0-127" -genv I_MPI_PIN_ORDER compact -n $ranks $command submit = numactl -p 1 mpirun -genv I_MPI_COMPATIBILITY 4 -np $ranks $command ################################################################# # Notes ################################################################# test_sponsor = Indiana University license_num = 3440 tester = Junjie Li test_date = Sep-2016 hw_avail = Aug-2016 sw_avail = Apr-2016 prepared_by = Indiana University system_vendor = Colfax International node_compute_hw_adapter_MISSING_slot_type = 0 node_compute_hw_adapter_MISSING_slot_ports_used = 0 node_compute_hw_adapter_MISSING_ports_used = 0 node_compute_hw_adapter_MISSING_model = 0 node_compute_hw_adapter_MISSING_interconnect = 0 node_compute_hw_adapter_MISSING_driver = 0 node_compute_hw_adapter_MISSING_data_rate = 0 node_compute_hw_adapter_MISSING_count = 0 system_name000 = Intel Xeon Phi 7210, 1.30 GHz, system_name001 = SMT on, Turbo off, flat (MCDRAM preferred) # # Computation node info # node_compute_label = KNL node_compute_order = 1 node_compute_count = 1 node_compute_purpose = head, compute, fileserver node_compute_hw_vendor = Colfax International node_compute_hw_model = None node_compute_hw_cpu_name = Intel Xeon Phi 7210 node_compute_hw_ncpuorder = 1 chip node_compute_hw_nchips = 1 node_compute_hw_ncores = 64 node_compute_hw_ncoresperchip = 64 node_compute_hw_nthreadspercore = 4 node_compute_hw_cpu_char000 = Intel Turbo Boost Technology off, node_compute_hw_cpu_char001 = Simultaneous Multithreading (SMT) on node_compute_hw_cpu_mhz = 1300 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per two cores node_compute_hw_tcache = None node_compute_hw_ocache = None node_compute_hw_memory000= 96 GB (6 x 16 GB 2Rx8 PC4-2400T-REB-11, ECC) node_compute_hw_disk = Intel S3510 SSD 800GB, SATA3 node_compute_hw_other = None node_compute_sw_os = CentOS Linux Release 7.2.1511 node_compute_sw_localfile = Linux/ext4 node_compute_sw_sharedfile = None node_compute_sw_state = Multi-User node_compute_sw_other = None # # Fileserver node info # # # IB interconnect # # # Cluster file system interconnect # # # Hardware # system_class = Homogeneous max_ranks = 256 max_peak_ranks = 256 # # Software # sw_c_compiler000= Intel C++ Composer XE 2016 for Linux, sw_c_compiler001 = Version 16.0.3.210 Build 20160415 sw_cxx_compiler000= Intel C++ Composer XE 2016 for Linux, sw_cxx_compiler001 = Version 16.0.3.210 Build 20160415 sw_f_compiler000= Intel Fortran Composer XE 2016 for Linux, sw_f_compiler001 = Version 16.0.3.210 Build 20160415 sw_auto_parallel = sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = Intel MPI Library for Linux 5.1.3 Build 20160120 sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # notes_010 = MPI startup command: #notes_015 = mpiexec.hydra command was used to start MPI jobs. notes_015 = mpirun command was used to start MPI jobs. notes_020 = notes_025 = BIOS settings: notes_030 = Intel Simultaneous Multithreading (SMT): on notes_035 = Intel Turbo Boost Technology (Turbo) : off notes_040 = Cluster Mode: quadrant notes_045 = Memory Mode: flat # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel_flags.xml flagsurl001 = http://www.spec.org/mpi2007/flags/colfax-knl.xml notes_000 =130.socorro (base): "nullify_ptrs" src.alt was used. notes_005 = notes_submit_000 =numactl -p 1 mpirun -genv I_MPI_COMPATIBILITY 4 -np $ranks $command