# Invocation command line: # /home/qlogic/huiyu/mpi2007/bin/runspec --define hosts=/home/qlogic/huiyu/mpi2007/hosts --ranks=512 --extension=infinipath --reportable --action validate -I -c qlogic-amd64-pathscale-infinipath -i mref -n 3 -T base -o asc medium -o asc,cfg,csv,txt # output_root was not used for this run ############################################################################ ##################################################################### # This is a sample config file. It was tested with: # # Compiler name/version: QLogic PathScale compilers version 3.0 # Communications software: QLogic InfiniPath software stack # versions 2.0 and 2.1 # Operating system version: SLES 10 (x86-64) # Node Hardware: AMD Opteron and Intel Xeon x64 servers # Interconnect adapters: QLogic InfiniPath InfiniBand # (PCI Express and HTX) adapters # # If your platform uses different versions, different # hardware or operates in a different mode (for # example, 32- vs. 64-bit mode), there is the possibility # that this configuration file may not work as-is. # # Note that issues with compilation should be directed # to the compiler vendor. Information about SPEC techncial # support can be found in the techsupport document in the # Docs directory of your benchmark installation. # # Also note that this is a sample configuration. It # is expected to work for the environment in which # it was tested; it is not guaranteed that this is # the config file that will provide the best performance. # # Note that you might find a more recent config file for # your platform with the posted results at # www.spec.org/mpi2007 ##################################################################### # A runspec line something like the following will supply # the information needed in the submit command below: # runspec --config qlogic-linux-x86_64-pathscale-infinipath \ # --define hosts=$SPEC/hostfile --ranks=24 # # A flags file such as the following can be downloaded from # http://www.spec.org/mpi2007/flags/ and put in your top-level # $SPEC directory and then uncomment this line: flagsurl000=http://www.spec.org/mpi2007/flags/MPI2007_flags.20070717.00.xml ###################################################################### makeflags = -j 4 env_vars = yes use_submit_for_speed = yes ext = infinipath teeout = yes teerunout = yes mean_anyway = yes MPI_HOME = /usr CC = $(MPI_HOME)/bin/mpicc -cc=pathcc -march=opteron FC = $(MPI_HOME)/bin/mpif90 -f90=pathf90 -march=opteron CXX = $(MPI_HOME)/bin/mpicxx -CC=pathCC -march=opteron ACML_DIR = /net/files/tools/acml/x86_64/acml3.5.0/pathscale64/lib submit = $[MPI_HOME]/bin/mpirun -m %{hosts} -np $ranks -disable-mpi-progress-check $command EXTRA_LDFLAGS = -IPA:max_jobs=4 ##################################################################### # Portability ##################################################################### default: EXTRA_CPORTABILITY= -DSPEC_MPI_LP64 115.fds4: CPORTABILITY = -DSPEC_MPI_LC_TRAILING_DOUBLE_UNDERSCORE 121.pop2: CPORTABILITY = -DSPEC_MPI_DOUBLE_UNDERSCORE 127.wrf2: CPORTABILITY = -DF2CSTYLE -DSPEC_MPI_DOUBLE_UNDERSCORE \ -DSPEC_MPI_LINUX 130.socorro: FPORTABILITY = -fno-second-underscore ################################################################# # Optimization flags and Notes ################################################################# default=base: COPTIMIZE = -Ofast FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -LANG:copyinout=off license_num = 0018 prepared_by = QLogic sw_auto_parallel = No sw_avail = Jul-2007 CXXOPTIMIZE = -O3 -OPT:Ofast -CG:local_fwd_sched=on sw_other = None sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit system_name000= AMD Emerald Cluster: AMD Opteron CPUs, system_name001 = QLogic InfiniPath/SilverStorm Interconnect system_vendor = AMD, QLogic Corporation, Rackable Systems, IWILL node_compute_hw_adapter_1g_slot_type = integrated on motherboard node_compute_hw_adapter_1g_ports_used = 1 node_compute_hw_adapter_1g_model = Intel 82541PI Gigabit Ethernet controller node_compute_hw_adapter_1g_interconnect = Ethernet node_compute_hw_adapter_1g_firmware = None node_compute_hw_adapter_1g_driver = Part of Linux kernel modules node_compute_hw_adapter_1g_data_rate = 1 Gbps Ethernet node_compute_hw_adapter_1g_count = 1 test_date = May-2007 test_sponsor = QLogic Corporation tester = QLogic Performance Engineering system_class = Homogenous node_fileserver_sw_state = Multi-User node_fileserver_sw_sharedfile = NFS node_fileserver_sw_other = Sun Grid Engine 6.0 node_fileserver_sw_localfile = Linux ext3 node_fileserver_purpose = file server, other node_fileserver_order = 2 node_fileserver_label = Headnode NFS filesystem node_fileserver_hw_vendor = Tyan node_fileserver_hw_tcache = None node_fileserver_hw_scache = 1 MB I+D on chip per core node_fileserver_hw_pcache = 64 KB I + 64 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ncpuorder = 1-4 chips node_fileserver_hw_ncoresperchip = 2 node_fileserver_hw_ncores = 8 node_fileserver_hw_nchips = 4 node_fileserver_hw_model = Thunder K8QSD Pro (S4882) motherboard node_fileserver_hw_memory = 16 GB (16 x 1 GB DDR400 dimms) node_fileserver_hw_cpu_name = AMD Opteron 885 node_fileserver_hw_cpu_mhz = 2600 node_fileserver_hw_adapter_ge_slot_type = integrated on motherboard node_fileserver_hw_adapter_ge_ports_used = 2 node_fileserver_hw_adapter_ge_model = Broadcom BCM5704C node_fileserver_hw_adapter_ge_interconnect = Ethernet node_fileserver_hw_adapter_ge_firmware = None node_fileserver_hw_adapter_ge_driver = Part of Linux kernel modules node_fileserver_hw_adapter_ge_data_rate = 1 Gbps Ethernet node_fileserver_hw_adapter_ge_count = 2 node_fileserver_hw_adapter = Gigabit Ethernet node_fileserver_count = 1 node_compute_sw_state = Multi-User node_compute_sw_sharedfile = NFS node_compute_sw_other = Sun Grid Engine 6.0 node_compute_sw_localfile = Linux ext3 node_compute_purpose = compute, head node_compute_order = 1 node_compute_label = Rackable, IWILL, AMD node_compute_hw_vendor = Rackable Systems, IWILL, AMD node_compute_hw_tcache = None node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_pcache = 64 KB I + 64 KB D on chip per core node_compute_hw_ocache = None node_compute_hw_nthreadspercore = 1 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_ncoresperchip = 2 node_compute_hw_ncores = 4 node_compute_hw_nchips = 2 node_compute_hw_memory = 8 GB (8 x 1 GB DDR400) node_compute_hw_disk = 250 GB, SATA node_compute_hw_cpu_name = AMD Opteron 290 node_compute_hw_cpu_mhz = 2800 node_compute_hw_adapter_ib_slot_type = HTX node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_model = QLogic InfiniPath QHT7140 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_firmware = None node_compute_hw_adapter_ib_driver = InfiniPath 2.1 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x SDR node_compute_hw_adapter_ib_count = 1 node_compute_count = 128 interconnect_ib_purpose = MPI traffic interconnect_ib_order = 1 interconnect_ib_label = QLogic InfiniBand HCAs and switches interconnect_ib_hw_vendor = QLogic interconnect_ib_hw_topo = Single switch (star) interconnect_ib_hw_switch_9120_ports = 144 interconnect_ib_hw_switch_9120_model = QLogic SilverStorm 9120 Fabric Director interconnect_ib_hw_switch_9120_firmware = 3.4.0.5.2 interconnect_ib_hw_switch_9120_data_rate = InfiniBand 4x SDR and InfiniBand 4x DDR interconnect_ib_hw_switch_9120_count = 1 interconnect_ib_hw_model = InfiniPath and Silverstorm interconnect_fs_purpose = file system traffic interconnect_fs_order = 2 interconnect_fs_label = Broadcom NICs, Force10 switches interconnect_fs_hw_vendor = Force10 interconnect_fs_hw_topo = Single switch (star) interconnect_fs_hw_switch_E300_ports = 288 interconnect_fs_hw_switch_E300_model = Force10 E300 Gig-E switch interconnect_fs_hw_switch_E300_firmware = N/A interconnect_fs_hw_switch_E300_data_rate = 1 Gbps Ethernet interconnect_fs_hw_switch_E300_count = 1 interconnect_fs_hw_model = E300 sw_preprocessors = No sw_mpi_other = None sw_mpi_library = QLogic InfiniPath MPI 2.1 sw_f_compiler = QLogic PathScale Fortran Compiler 3.0 sw_cxx_compiler = QLogic PathScale C++ Compiler 3.0 sw_c_compiler = QLogic PathScale C Compiler 3.0 104.milc=peak: basepeak=true 107.leslie3d=peak: FOPTIMIZE = -Ofast -OPT:unroll_size=256 113.GemsFDTD=peak: basepeak=true 115.fds4=peak: basepeak=true 121.pop2=peak: basepeak=true 122.tachyon=peak: basepeak=true 126.lammps=peak: basepeak=true 127.wrf2=peak: basepeak=true 128.GAPgeofem=peak: basepeak=true 129.tera_tf=peak: FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -OPT:unroll_size=256 130.socorro=peak=default=default: COPTIMIZE = -Ofast -OPT:malloc_alg=1 FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -LANG:copyinout=off RM_SOURCES = specblas.F90 specbessel.c EXTRA_LIBS = -L$(ACML_DIR) -lacml 132.zeusmp2=peak: basepeak=true 137.lu=peak: basepeak=true # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: hw_avail = Nov-2006 node_compute_hw_model000 = Rackable Systems C1000 chassis, IWILL DK8-HTX node_compute_hw_model001 = motherboard node_compute_hw_other000 = Nodes custom-built by Rackable Systems. The node_compute_hw_other001 = Rackable C1000 chassis is half-depth with 450W, node_compute_hw_other002 = 48 VDC Power Supply. Integrated Gigabit Ethernet node_compute_hw_other003 = for admin/filesystem. node_compute_sw_os000 = ClusterCorp Rocks 4.2.1 node_compute_sw_os001 = (Based on RedHat Enterprise Linux 4.0 Update 4) node_fileserver_hw_disk000 = 250 GB, SATA, 7200 RPM node_fileserver_sw_os000 = ClusterCorp Rocks 4.2.1 node_fileserver_sw_os001 = (Based on RedHat Enterprise Linux 4.0 Update 4) node_fileserver_notes_000="other" purposes of this node: login, compile, job submission node_fileserver_notes_005=and queuing. node_fileserver_notes_010=This node assembled with a 2U chassis and 700 watt ATX 12V Power Supply. interconnect_ib_notes_000 =The data rate between InifniPath HCAs and SilverStorm switches interconnect_ib_notes_005 =is SDR. However, DDR is used for inter-switch links.