# Invocation command line: # /mnt/share/hpc2021/bin/harness/runhpc --reportable --define EXPID=submission/tiny.omp.8.30 --define model=omp --iterations=3 -c xfusion.icx.base-peak.cfg --tune=base,peak --define RANKS=8 --define THREADS=30 --size=ref --flagsurl ./config/flags/Intel-oneAPI-icx2021-official-linux64.xml --output_format=all tiny # output_root was not used for this run ############################################################################ #!/bin/bash ###################################################################### # Example configuration file for the Intel 2021 Compilers # # Defines: "acctype" => "mpi", "omp", default "mpi" # "label" => ext base label, default "xufsion" # # MPI-only Command: # runhpc --rebuild --reportable --define EXPID=submission/tiny.mpi.rank_120 --define model=mpi --iterations=3 -c xfusion.icx.base-peak.cfg -T base --input ref --define RANKS=120 --output_format=all tiny # # OpenMP Command: # runhpc --rebuild --reportable --define EXPID=submission/tiny.omp.rank_16.thread_15 --define model=omp --iterations=3 -c xfusion.icx.base-peak.cfg -T base,peak --input ref --define RANKS=16 --define THREADS=15 --output_format=all tiny # ####################################################################### expid= %ifdef %{EXPID} expid=%{EXPID} %endif allow_label_override = yes # label controls srcalt: simd - for simd build_in_build_dir=0 # build in run dir basepeak=0 %ifndef %{label} # IF label is not set use intel % define label xfusion %endif %ifndef %{model} # IF acctype is not set use mpi % define pmodel MPI %endif teeout = yes makeflags=-j # Tester description license_num = 6488 showtimer = 0 test_sponsor = xFusion tester = xFusion ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # ----- Begin inclusion of 'Example_SUT.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = xFusion system_name = xFusion 2288H V7 (Intel Xeon Platinum 8490H) node_compute_sw_accel_driver = N/A hw_avail = Jan-2023 sw_avail = Nov-2022 prepared_by = xFusion # Computation node info # [Node_Description: Hardware] node_compute_syslbl = xFusion 2288H V7 node_compute_order = 1 node_compute_count = 1 node_compute_purpose = Compute node_compute_hw_vendor = xFusion node_compute_hw_model = xFusion 2288H V7 node_compute_hw_cpu_name = Intel Xeon Platinum 8490H node_compute_hw_ncpuorder = 2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 120 node_compute_hw_ncoresperchip = 60 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Turbo Boost Technology up to 3.5 GHz node_compute_hw_cpu_mhz = 1900 node_compute_hw_pcache = 32 KB I + 48 KB D on chip per core node_compute_hw_scache = 2 MB I+D on chip per core node_compute_hw_tcache = 112.5 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 1 TB (16 x 64 GB 2Rx4 PC5-4800B-R) node_compute_hw_disk = 1 x 7.68 TB NVMe SSD node_compute_hw_other = None #[Node_Description: Accelerator] #[Node_Description: Software] node_compute_hw_adapter_fs_model = N/A node_compute_hw_adapter_fs_count = 0 node_compute_hw_adapter_fs_slot_type = N/A node_compute_hw_adapter_fs_data_rate = N/A node_compute_hw_adapter_fs_ports_used = 0 node_compute_hw_adapter_fs_interconnect = N/A node_compute_hw_adapter_fs_driver = N/A node_compute_hw_adapter_fs_firmware = N/A node_compute_sw_os000 = CentOS Linux release 8.2.2004 node_compute_sw_os001 = 4.18.0-193.el8.x86_644 node_compute_sw_localfile = xfs node_compute_sw_sharedfile = N/A node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = N/A #[Fileserver] #[Interconnect] interconnect_fs_syslbl = N/A interconnect_fs_order = 0 interconnect_fs_purpose = N/A interconnect_fs_hw_vendor = N/A interconnect_fs_hw_model = N/A interconnect_fs_hw_switch_fs_model = N/A interconnect_fs_hw_switch_fs_count = 0 interconnect_fs_hw_switch_fs_ports = 0 interconnect_fs_hw_topo = N/A interconnect_fs_hw_switch_fs_data_rate = N/A interconnect_fs_hw_switch_fs_firmware = N/A ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/panfs/projects/innl/emelnich/SPECMPIACCEL/SPECMPIACCEL_SUITE/kit39-Release-1.0.2/hpc2021/config/Example_SUT.inc' ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{model} tune = all output_format = all use_submit_for_speed = 1 # Compiler Settings default: AR = ar ARFLAGS = cr CC = mpiicc -cc=icx CXX = mpiicpc -cxx=icx FC = mpiifort -fc=ifx system_class = Homogenous Cluster sw_other = N/A sw_compiler = Intel oneAPI Compiler 2022.2.1 sw_mpi_other = N/A # Compiler Version Flags CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # FOPTIMIZE = flags appliable to the Fortran compiler # # See your compiler manual for information on the flags available # for your compiler vec_novec=-no-vec vec_avx2=-xCORE-AVX2 vec_avx512=-xCORE-AVX512 vec_avx512_high=-xCORE-AVX512 -mprefer-vector-width=512 vec_avx512_streaming_stores=-xCORE-AVX512 -mllvm -hir-nontemporal-cacheline-count=0 vec_avx512_high_exp1=-xCORE-AVX512 -mprefer-vector-width=512 -ffast-math vecavx512_high_exp2=-xCORE-AVX512 -mprefer-vector-width=512 -flto vec_avx512_high_exp3=-xCORE-AVX512 -mprefer-vector-width=512 -funroll-loops vec_avx512_high_exp4=-xCORE-AVX512 -mprefer-vector-width=512 -ffast-math -flto -funroll-loops veci_avx512_high_exp5=-xCORE-AVX512 -mprefer-vector-width=512 -ffinite-math-only vec_avx512_high_exp6=-xCORE-AVX512 -mprefer-vector-width=512 -fimf-precision=low:sin,sqrt vec_avx512_high_exp7=-xCORE-AVX512 -mprefer-vector-width=512 -ffinite-math-only -fimf-precision=low:sin,sqrt -ffast-math -flto -funroll-loops vec_common512=-xCOMMON-AVX512 vec=-xCORE-AVX512 -mprefer-vector-width=512 default=base,peak: OPTIMIZE = -Ofast -ipo ${vec} -fiopenmp COPTIMIZE = -ansi-alias CXXOPTIMIZE = -ansi-alias PORTABILITY = -lstdc++ FOPTIMIZE = -nostandard-realloc-lhs -align array64byte mpicmd = mpiexec.hydra -bootstrap ssh --bind-to core -np $ranks -genv OMP_NUM_THREADS=$threads $command submit = $mpicmd default=base=default: ranks = %{RANKS} threads = %{THREADS} %if %{model} eq 'omp' pmodel=OMP %endif 505.lbm_t=peak: ranks=60 threads=4 vec=${vec_avx512_high} pmodel=OMP 513.soma_t=peak: ranks=2 threads=120 vec=${vec_avx512_high} pmodel=OMP 518.tealeaf_t=peak: basepeak=1 519.clvleaf_t=peak: ranks=60 threads=4 vec=${vec_avx512_streaming_stores} pmodel=OMP 521.miniswp_t=peak: ranks=4 threads=60 vec=${vec_avx512_high} pmodel=OMP 528.pot3d_t=peak: ranks=120 threads=2 vec=${vec_avx512_high} pmodel=OMP 532.sph_exa_t=peak: ranks=30 threads=8 vec=${vec_avx512_high_exp1} pmodel=OMP 534.hpgmgfv_t=peak: basepeak=1 535.weather_t=peak: ranks=40 threads=6 vec=${vec_avx512_high} pmodel=OMP 513.soma_t=base,peak: PORTABILITY+=-DSPEC_NO_VAR_ARRAY_REDUCE # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/hpc2021/flags/Intel-oneAPI-icx2021-official-linux64.2023-01-27.xml sw_mpi_library000 = Intel MPI Library for Linux* OS, Version 2022.2.1 sw_mpi_library001 = Build 20221020 notes_submit_000 =export LD_PRELOAD="/usr/lib64/libhugetlbfs.so $LD_PRELOAD" notes_submit_005 =export OMP_PROC_BIND=true notes_submit_010 =mpiexec.hydra -bootstrap ssh --bind-to core -np $ranks -genv OMP_NUM_THREADS=$threads $command notes_000 = notes_005 =Submitted_by: luxu notes_010 =Submitted: Thu Jan 26 12:25:09 EST 2023 notes_015 =Submission: hpc2021-20230108-00156.sub