# Invocation command line: # /home/HPC2021F1.0.1/bin/harness/runhpc --reportable --config=Pro8S_I204_IMPI204.cfg --tune=base pmodel=OMP --define model=omp --threads=7 --rank=32 --size=ref --iterations=3 tiny # output_root was not used for this run ############################################################################ build_in_build_dir=0 # build in run dir strict_rundir_verify = 1 %ifndef %{label} # IF acctype is not set use mpi % define label Pro8S_I204_IMPI204 %endif %ifndef %{model} # IF acctype is not set use mpi % define model mpi %endif teeout = yes makeflags=-j 40 flagsurl000=http://www.spec.org/hpc2021/flags/Intel_compiler_flags.xml ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{model} tune = base output_format = text use_submit_for_speed = 1 default: AR = ar ARFLAGS = cr CC = mpiicc CXX = mpiicpc FC = mpiifort sw_compiler000= Intel Parallel Studio 2020 Update 4 sw_compiler001 = Version 19.1.3.304 Build 20200925 test_sponsor = Lenovo Global Technology license_num = 28 tester = Lenovo Global Technology hw_avail = Apr-2019 sw_avail = Sep-2020 prepared_by = Lenovo Global Technology system_vendor = Lenovo Global Technology system_name = ThinkSystem SR950 (Intel Xeon Platinum 8280L) node_fileserver_syslbl = ThinkSystem SR950 node_fileserver_sw_state = Multi-User, run level 3 node_fileserver_sw_sharedfile = N/A node_fileserver_sw_other = None node_fileserver_sw_os = Red Hat Enterprise Linux Server release 7.6 node_fileserver_sw_localfile = xfs node_fileserver_sw_accel_driver = N/A node_fileserver_purpose = Fileserver node_fileserver_order = 1 node_fileserver_hw_vendor = Lenovo Global Technology node_fileserver_hw_tcache = 39424 KB I+D on chip per chip node_fileserver_hw_scache = 1 MB I+D on chip per core node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ncpuorder = 2,3,4,6,8 chips node_fileserver_hw_ncoresperchip = 28 node_fileserver_hw_ncores = 224 node_fileserver_hw_nchips = 8 node_fileserver_hw_model = ThinkSystem SR950 node_fileserver_hw_memory = 1536 GB (96 x 16 GB 2Rx8 PC4-2933Y-R) node_fileserver_hw_disk = 1 x 960 GB NVME 2.5" SSD node_fileserver_hw_cpu_name = Intel Xeon Platinum 8280L node_fileserver_hw_cpu_mhz = 2700 node_fileserver_hw_cpu_char = Intel Turbo Boost Technology up to 4.0 GHz node_fileserver_hw_adapter_fs_slot_type = N/A node_fileserver_hw_adapter_fs_ports_used = 0 node_fileserver_hw_adapter_fs_model = N/A node_fileserver_hw_adapter_fs_interconnect = N/A node_fileserver_hw_adapter_fs_firmware = N/A node_fileserver_hw_adapter_fs_driver = N/A node_fileserver_hw_adapter_fs_data_rate = N/A node_fileserver_hw_adapter_fs_count = 0 node_fileserver_count = 1 node_compute_syslbl = ThinkSystem SR950 node_compute_sw_state = Multi-user, run level 3 node_compute_sw_sharedfile = None node_compute_sw_other = None node_compute_sw_localfile = xfs node_compute_purpose = compute node_compute_order = 1 node_compute_hw_vendor = Lenovo Global Technology node_compute_hw_tcache = 39424 KB I+D on chip per chip node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_other = None node_compute_hw_ocache = None node_compute_hw_nthreadspercore = 1 node_compute_hw_ncpuorder = 2,3,4,6,8 chips node_compute_hw_ncoresperchip = 28 node_compute_hw_ncores = 224 node_compute_hw_nchips = 8 node_compute_hw_model = ThinkSystem SR950 node_compute_hw_memory = 1536 GB (96 x 16 GB 2Rx8 PC4-2933Y-R) node_compute_hw_disk = 1 x 960 GB NVME 2.5" SSD node_compute_hw_cpu_name = Intel Xeon Platinum 8280L node_compute_hw_cpu_mhz = 2700 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 4.0 GHz node_compute_hw_adapter_fs_slot_type = None node_compute_hw_adapter_fs_ports_used = 0 node_compute_hw_adapter_fs_model = None node_compute_hw_adapter_fs_interconnect = None node_compute_hw_adapter_fs_firmware = None node_compute_hw_adapter_fs_driver = None node_compute_hw_adapter_fs_data_rate = None node_compute_hw_adapter_fs_count = 0 node_compute_hw_accel_vendor = None node_compute_hw_accel_type = None node_compute_hw_accel_model = None node_compute_hw_accel_ecc = None node_compute_hw_accel_desc = None node_compute_hw_accel_count = 0 node_compute_hw_accel_connect = None node_compute_count = 1 interconnect_fs_syslbl = N/A interconnect_fs_purpose = N/A interconnect_fs_order = 0 interconnect_fs_label = N/A interconnect_fs_hw_vendor = N/A interconnect_fs_hw_topo = N/A interconnect_fs_hw_switch_fs_ports = 0 interconnect_fs_hw_switch_fs_model = N/A interconnect_fs_hw_switch_fs_firmware = N/A interconnect_fs_hw_switch_fs_data_rate = N/A interconnect_fs_hw_switch_fs_count = 0 interconnect_fs_hw_model = N/A CC_VERSION_OPTION = -V -c CXX_VERSION_OPTION = -V -c FC_VERSION_OPTION = -V -c %if %{model} eq 'mpi' submit = mpirun -host localhost -genv coll_hcoll_enable 1 -genv HCOLL_MAIN_IB=mlx5_0:1 -genv UCX_TLS=sm -genv pml ucx -np $ranks $command %else %{model} eq 'omp' submit = mpirun -host localhost -genv coll_hcoll_enable 1 -genv HCOLL_MAIN_IB=mlx5_0:1 -genv UCX_TLS=sm -genv pml ucx --map-by numa -np $ranks $command %endif %if %{VEC} eq 'novec' vec=-no-vec %elif %{VEC} eq 'avx2' vec=-xCORE-AVX2 %elif %{VEC} eq 'avx512' vec=-xCORE-AVX512 %elif %{VEC} eq 'avx512_high' vec=-xCORE-AVX512 -qopt-zmm-usage=high %else vec=-xCORE-AVX512 %endif default=base=default: OPTIMIZE = -Ofast -no-prec-div -xCORE-AVX512 -ipo COPTIMIZE = -ansi-alias CXXOPTIMIZE = -ansi-alias #PORTABILITY = -DSPEC_LP64 %if %{model} eq 'omp' pmodel=OMP OPTIMIZE += -qopenmp %endif default=peak=default: basepeak=1 613.soma_s=default=default: %if %{model} eq 'omp' PORTABILITY += -DSPEC_NO_VAR_ARRAY_REDUCE %endif 513.soma_t=default=default: %if %{model} eq 'omp' PORTABILITY += -DSPEC_NO_VAR_ARRAY_REDUCE %endif # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: node_compute_sw_os000 = Red Hat Enterprise Linux Server release 7.6, node_compute_sw_os001 = Kernel 3.10.0-957.el7.x86_64 sw_mpi_library000 = Intel MPI Library for Linux* OS sw_mpi_library001 = Version 2020 Update 9 Build 20200923 system_class = Homogenous notes_submit_000 = The config file option 'submit' was used. notes_submit_005 = submit = mpirun -host localhost -genv coll_hcoll_enable 1 -genv HCOLL_MAIN_IB=mlx5_0:1 notes_submit_010 = -genv UCX_TLS=sm -genv pml ucx --map-by numa -np $ranks $command