# Invocation command line: # /data/caar/spec/hpc2021-1.0.2/bin/harness/runhpc --config=intel.cfg --reportable --tune=base -ranks 40 --threads 2 --define model=mpi tiny # output_root was not used for this run ############################################################################ #!/bin/bash ###################################################################### # Example configuration file for the Intel 2020 Compilers # # Defines: "model" => "mpi", "omp", default "mpi" # "label" => ext base label, default "intel" # # MPI-only Command: # runhpc -c Example_intel --reportable -I -l -n 1 -T base -i test,ref --define model=mpi --ranks=72 tiny # # OpenMP Command: # runhpc -c Example_intel --reportable -I -l -n 1 -T base -i test,ref --define pmodel=omp --threads=8 --ranks=4 tiny # ####################################################################### allow_label_override = yes # label controls srcalt: simd - for simd %ifndef %{label} # IF acctype is not set use mpi % define label intel %endif %ifndef %{model} # IF acctype is not set use mpi % define model mpi %endif teeout = yes makeflags=-j 80 ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # ----- Begin inclusion of 'Example_SUT.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = Transtec system_name = Hemera: Intel Server Board S2600BPB (Intel Xeon Gold 6148) hw_avail = Nov-2099 sw_avail = Nov-2099 # Computation node info # [Node_Description: Hardware] node_compute_syslbl = Compute Node node_compute_order = 1 node_compute_count = 1 node_compute_purpose = compute node_compute_hw_vendor = Intel node_compute_hw_model = Intel Server Board S2600BPB node_compute_hw_cpu_name = Intel Xeon Gold 6148 node_compute_hw_ncpuorder = 1 or 2 per node node_compute_hw_nchips = 2 node_compute_hw_ncores = 40 node_compute_hw_ncoresperchip = 20 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 3.7 GHz node_compute_hw_cpu_mhz = 2400 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache000= 28160 KB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 384 GB (12 x 32GB 2Rx4 PC4-2666V-RB2-12) node_compute_hw_disk = 1 x 500 GB SSD node_compute_hw_other = None #[Node_Description: Accelerator] node_compute_hw_accel_model = -- node_compute_hw_accel_count = 0 node_compute_hw_accel_vendor= -- node_compute_hw_accel_type = -- node_compute_hw_accel_connect = -- node_compute_hw_accel_ecc = -- node_compute_hw_accel_desc = -- #[Node_Description: Software] node_compute_hw_adapter_fs_model = Mellanox MT4115 node_compute_hw_adapter_fs_count = 2 node_compute_hw_adapter_fs_slot_type = PCI-Express 3.0 x16 node_compute_hw_adapter_fs_data_rate = 100 Gb/s node_compute_hw_adapter_fs_ports_used = 2 node_compute_hw_adapter_fs_interconnect = EDR Infiniband node_compute_hw_adapter_fs_driver = -- node_compute_hw_adapter_fs_firmware = 12.28.2006 node_compute_sw_os000 = CentOS Linux release 7.9.2009 (Core) node_compute_sw_os001 = 3.10.0-1160.6.1.el7.x86_64 node_compute_sw_localfile = xfs node_compute_sw_sharedfile000= GPFS Version 5.0.5.0 node_compute_sw_sharedfile001 = 6 NSD (vendor: NEC) node_compute_sw_sharedfile002 = 5 building blocks (vendor: NetApp): node_compute_sw_sharedfile003 = 2x (240 x 8 TB HDD) node_compute_sw_sharedfile004 = 1x (180 x 12 TB HDD) node_compute_sw_sharedfile005 = 1x (240 x 16 TB HDD) node_compute_sw_sharedfile006 = 1x (120 x 16 TB HDD) node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None #[Fileserver] #[Interconnect] interconnect_fs_syslbl = Infiniband (EDR) interconnect_fs_order = 1 interconnect_fs_purpose = MPI Traffic, GPFS interconnect_fs_hw_vendor = Mellanox Technologies interconnect_fs_hw_model = Mellanox SB7790 interconnect_fs_hw_switch_fs_model000= 36 x EDR 100 Gb/s interconnect_fs_hw_switch_fs_count = 2 interconnect_fs_hw_switch_fs_ports = 36 interconnect_fs_hw_topo = Mesh (blocking factor: 8:1) interconnect_fs_hw_switch_fs_data_rate = 100 Gb/s interconnect_fs_hw_switch_fs_firmware = -- ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/data/caar/spec/hpc2021-1.0.2/config/Example_SUT.inc' # System Description system_class = Homogenous Cluster # CPU description # Cache description # Tester description license_num = 065A test_sponsor = Helmholtz-Zentrum Dresden - Rossendorf tester = Helmholtz-Zentrum Dresden - Rossendorf # Operating system, file system sw_other = None ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{model} tune = base output_format = text use_submit_for_speed = 1 default: AR = ar ARFLAGS = cr CC = mpiicc CXX = mpiicpc FC = mpiifort sw_compiler = Intel Parallel Studio XE 2020 hw_avail = Jul-2017 sw_avail = Oct-2020 prepared_by = Jeffrey Kelling CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version %if %{model} eq 'mpi' MPIRUN_OPTS = --bind-to core %endif %if %{model} eq 'omp' MPIRUN_OPTS = --bind-to socket %endif submit = mpiexec.hydra ${MPIRUN_OPTS} -np $ranks $command ####################################################################### # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # FOPTIMIZE = flags appliable to the Fortran compiler # # See your compiler manual for information on the flags available # for your compiler default=base=default: OPTIMIZE = -Ofast -xCORE-AVX512 COPTIMIZE = -ansi-alias CXXOPTIMIZE = -ansi-alias PORTABILITY = -DSPEC_LP64 %if %{model} eq 'mpi' pmodel=MPI %endif %if %{model} eq 'omp' pmodel=OMP OPTIMIZE += -qopenmp %endif 513.soma_t: PORTABILITY+=-DSPEC_NO_VAR_ARRAY_REDUCE default=peak=default: basepeak=1 # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/hpc2021/flags/EM64T_Intel_flags.xml notes_000 =This benchmark result is intended to provide perspective on notes_005 =past performance using the historical hardware and/or notes_010 =software described on this result page. notes_015 = notes_020 =The system as described on this result page was formerly notes_025 =generally available. At the time of this publication, it may notes_030 =not be shipping, and/or may not be supported, and/or may fail notes_035 =to meet other tests of General Availability described in the notes_040 =SPEC HPG Policy document, http://www.spec.org/hpg/policy.html notes_045 = notes_050 =This measured result may not be representative of the result notes_055 =that would be measured were this benchmark run with hardware notes_060 =and software available as of the publication date. notes_065 = notes_submit_000 =The config file option 'submit' was used. notes_submit_005 = MPI startup command: notes_submit_010 = mpiexec.hydra --bind-to core -np $ranks $command