# Invocation command line: # /lfs/lfs14/emelnich/SPECHPC2021/hpc2021-1.0.2/bin/harness/runhpc --rebuild --reportable --define EXPID=submission/tiny.nodes_4.mpi_48.ppn_12.omp_12 --define model=omp --define ppn=12 --iterations=3 -c intel.icx.base-peak.tiny.4nodes.cfg -T base,peak --input ref --define RANKS=48 --define THREADS=12 --flagsurl Intel-ic18.0-official-linux64.2019-04-02.xml tiny # output_root was not used for this run ############################################################################ #!/bin/bash ####################################################################### expid= %ifdef %{EXPID} expid=%{EXPID} %endif allow_label_override = yes # label controls srcalt: simd - for simd build_in_build_dir=0 # build in run dir basepeak=0 %ifndef %{label} # IF acctype is not set use mpi % define label intel %endif %ifndef %{model} # IF acctype is not set use mpi % define pmodel MPI %endif teeout = yes ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # ----- Begin inclusion of 'Example_SUT.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = Intel system_name = Endeavour: Intel Server M50CYP2UR208 (Intel Xeon Platinum 8360Y) node_fileserver_hw_adapter_fs_model = Mellanox ConnectX-4 EDR hw_avail = Jul-2021 sw_avail = Jul-2021 prepared_by = Egor Melnichenko # Computation node info # [Node_Description: Hardware] node_compute_syslbl = Intel Server M50CYP2UR208 (Xeon 8360Y) node_compute_order = 1 node_compute_count = 4 node_compute_purpose = Compute node_compute_hw_vendor = Intel node_compute_hw_model = Intel Server M50CYP2UR208 (Xeon 8360Y) node_compute_hw_cpu_name = Intel Xeon Platinum 8360Y node_compute_hw_ncpuorder = 1, 2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 72 node_compute_hw_ncoresperchip = 36 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Turbo Boost Technology up to 3.5 GHz node_compute_hw_cpu_mhz = 2400 node_compute_hw_pcache = 32 KB I + 48 KB D on chip per core node_compute_hw_scache = 1536 KB I+D on chip per core node_compute_hw_tcache000= 54 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory000= 256 GB (16 x 16 GB 2Rx8 PC4-3200R) node_compute_hw_disk = 1 x 960 GB SATA 2.5" SSD node_compute_hw_other = None #[Node_Description: Accelerator] node_compute_hw_accel_count = None node_compute_hw_accel_vendor= None node_compute_hw_accel_type = None node_compute_hw_accel_connect = None node_compute_hw_accel_ecc = None node_compute_hw_accel_desc = None #[Node_Description: Software] node_compute_hw_adapter_fs_model = Mellanox ConnectX-6 HDR node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCI-Express 4.0 x16 node_compute_hw_adapter_fs_data_rate = 200Gbit/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Mellanox HDR node_compute_hw_adapter_fs_driver = 5.1-2.5.8.0 node_compute_hw_adapter_fs_firmware = 20.29.2002 node_compute_sw_os000 = CentOS Linux release 8.4.2105 node_compute_sw_os001 = 4.18.0-240.22.1.el8_3.crt2.x86_64 node_compute_sw_localfile = NFS node_compute_sw_sharedfile = Lustre FS node_compute_sw_state = Multi-user node_compute_sw_other = -- #[Fileserver] node_fileserver_sw_state = Multi-User node_fileserver_sw_sharedfile = Lustre FS node_fileserver_sw_other = None node_fileserver_sw_os000= CentOS Linux release 7.8.2003 node_fileserver_sw_os001 = 4.18.0-240.22.1.el8_3.crt2.x86_64 node_fileserver_sw_localfile = None node_fileserver_purpose = Fileserver node_fileserver_order = 2 node_fileserver_syslbl = LustreFS node_fileserver_hw_vendor = Intel node_fileserver_hw_tcache = 25344 KB I+D on chip per chip node_fileserver_hw_scache = 512 KB I+D on chip per core node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 2 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_ncoresperchip = 8 node_fileserver_hw_ncores = 16 node_fileserver_hw_nchips = 2 node_fileserver_hw_model = Inspur NF5280M5 node_fileserver_hw_memory000= 192 GB (12 x 16 GB 2Rx8 PC4-2666R) node_fileserver_hw_disk = 1 x 1 TB 12 Gbps SAS 2.5" SSD node_fileserver_hw_cpu_name = Intel Xeon Gold 6244 node_fileserver_hw_cpu_mhz = 3600 node_fileserver_hw_cpu_char = Intel Xeon Gold node_fileserver_hw_adapter_fs_slot_type = PCI-Express 4.0 x16 node_fileserver_hw_adapter_fs_ports_used = 2 node_fileserver_hw_adapter_fs_interconnect = Mellanox EDR node_fileserver_hw_adapter_fs_firmware = 20.29.2002 node_fileserver_hw_adapter_fs_driver = 5.1-2.5.8.0 node_fileserver_hw_adapter_fs_data_rate = 100 Gb/s node_fileserver_hw_adapter_fs_count = 1 node_fileserver_count = 1 #[Interconnect] interconnect_fs_syslbl = Mellanox HDR interconnect_fs_order = 0 interconnect_fs_purpose = MPI Traffic interconnect_fs_hw_vendor = Mellanox interconnect_fs_hw_model = Mellanox HDR interconnect_fs_hw_switch_fs_model000= Mellanox MQM8790-HS2F Quantum HDR interconnect_fs_hw_switch_fs_model001 = InfiniBand Switch interconnect_fs_hw_switch_fs_count = 18 interconnect_fs_hw_switch_fs_ports = 40 interconnect_fs_hw_topo = Fat-tree interconnect_fs_hw_switch_fs_data_rate = 200 Gbit/s interconnect_fs_hw_switch_fs_firmware = 20.29.2002 ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/lfs/lfs14/emelnich/SPECHPC2021/hpc2021-1.0.2/config/Example_SUT.inc' # System Description system_class = Homogenous Cluster # CPU description # Cache description # Tester description license_num = 13 test_sponsor = Intel tester = Intel # Operating system, file system sw_mpi_other = None sw_other = None ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{model} tune = all output_format = text use_submit_for_speed = 1 allow_label_override = yes default: AR = ar ARFLAGS = cr CC = mpiicc -cc=icx CXX = mpiicpc -cxx=icx FC = mpiifort -fc=ifx sw_compiler = Intel oneAPI Compiler 2021.3.0 CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # FOPTIMIZE = flags appliable to the Fortran compiler # # See your compiler manual for information on the flags available # for your compiler vec_novec=-no-vec vec_avx2=-xCORE-AVX2 vec_avx512=-xCORE-AVX512 vec_avx512_high=-xCORE-AVX512 -mprefer-vector-width=512 vec_avx512_streaming_stores=-xCORE-AVX512 -mllvm -hir-nontemporal-cacheline-count=0 vec=-xCORE-AVX512 -mprefer-vector-width=512 default=base,peak: OPTIMIZE = -Ofast -ipo ${vec} -fiopenmp COPTIMIZE = -ansi-alias CXXOPTIMIZE = -ansi-alias PORTABILITY = -lstdc++ FOPTIMIZE = -nostandard-realloc-lhs -align array64byte mpicmd = mpiexec.hydra -bootstrap ssh -np $ranks -ppn $ppn -f \$HOSTFILE $command submit = $mpicmd default=base=default: ranks = %{RANKS} threads = %{THREADS} ppn = %{ppn} %if %{model} eq 'omp' pmodel=OMP %endif 505.lbm_t=peak: ranks=24 threads=12 ppn=6 vec=${vec_avx512_high} pmodel=OMP 513.soma_t=peak: ranks=8 threads=72 ppn=2 vec=${vec_avx512} pmodel=OMP 518.tealeaf_t=peak: ranks=24 threads=12 ppn=6 vec=${vec_avx512_high} pmodel=OMP 519.clvleaf_t=peak: ranks=16 threads=18 ppn=4 vec=${vec_avx512_streaming_stores} pmodel=OMP 521.miniswp_t=peak: ranks=8 threads=36 ppn=2 vec=${vec_avx512_high} pmodel=OMP 528.pot3d_t=peak: ranks=48 threads=6 ppn=12 vec=${vec_avx512_high} pmodel=OMP 532.sph_exa_t=peak: ranks=24 threads=12 ppn=6 vec=${vec_avx512_high} pmodel=OMP 534.hpgmgfv_t=peak: ranks=8 threads=72 ppn=2 vec=${vec_no_vec} pmodel=OMP 535.weather_t=peak: ranks=72 threads=4 ppn=18 vec=${vec_avx512_high} pmodel=OMP 513.soma_t=base,peak: PORTABILITY+=-DSPEC_NO_VAR_ARRAY_REDUCE # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/hpc2021/flags/Intel-oneAPI-icx2021-official-linux64.xml sw_mpi_library000 = Intel MPI Library for Linux* OS, Version 2021.2 sw_mpi_library001 = Build 20210302