# Invocation command line: # /data/benchmarks/SPEChpc2002/bin/runspec -c env.cfg --action validate --strict --reportable -e mpi env_m ############################################################################ # Invocation command line: # /data/benchmarks/SPEChpc2002/bin/runspec -c em-gm-pgi.cfg --action validate --strict --reportable -e mpi env_m ############################################################################ # action = validate teeout = yes output_format = asc license_num = HPG0005 company_name = IBM Corporation machine_name = IBM eServer 1350 Cluster hw_vendor = IBM Corporation hw_model = eServer 325 Cluster hw_cpu = AMD Opteron (246) hw_cpu_mhz = 2000 hw_fpu = Integrated hw_ncpuorder = 1,2 per node, unlimited nodes hw_pcache = 64KBI + 64KBD on chip hw_scache = 1024KB(I+D) on chip hw_tcache = None hw_ocache = None hw_parallel = MPI hw_memory = 4GB DDR333 CL2.5 Registered ECC hw_disk = 1 x 38GB SCSI per node hw_other000 = Myrinet M3F-PCIXD-2 Adapters (one per node) hw_other001 = Myrinet M3-E32 Switch Enclosure hw_other002 = Myrinet M3-SW16-8F 8-Port Line Card (4) hw_other003 = x345 File Server hw_other004 = Cisco 4003 GbE Switch hw_other005 = ServeRAID 4H SCSI RAID Adapter hw_other006 = SCSI Drives (10) hw_avail = Oct-2003 sw_os000 = SuSE Linux 8.0 SLES 64 bit Kernel sw_os001 = k_smp-2.4.19-249 (from Service Pack 2) sw_file = Linux/reiserfs sw_state = Multi-user sw_avail = Jul-2003 tester_name = IBM Corporation test_site = Research Triangle Park, NC test_date = Oct-2003 sw_compiler000 = Fortran: Portland Group 5.0-2 Fortran 90 sw_compiler001 = C: Portland Group 5.0-2 C sw_other000 = MPICH 1.2.5.10 sw_other001 = GM 2.0.6_Linux # sw_other002 = AMD Core Math Library (ACML) hw_ncpu = 48 sw_parallel = MPI sw_procs_thrds = 48 sw_ranks = 48 sw_threads = N/A prepared_by = Douglas Pase GCCDIR = /opt/gcc33/bin PGIDIR = /usr/pgi/linux86-64/5.0/bin CC = $(PGIDIR)/pgcc CXX = $(PGIDIR)/pgCC FC = $(PGIDIR)/pgf90 F77 = $(PGIDIR)/pgf90 CPP = $(PGIDIR)/pgf90 EXTRA_LDFLAGS = env_vars = yes HPC_HOME = /data/benchmarks/SPEChpc2002 HPC_CONFIG = $HPC_HOME/config P4PG_HOME = $HPC_CONFIG/p4pg ENVR_HOME = $HPC_CONFIG/env MF_HOME = $HPC_CONFIG/mf BENCH_HOME = $HPC_HOME/benchspec/HPC2002 PGI_HOME = /usr/pgi/linux86-64/5.0 GM_HOME = /data/gm-2.0.6_Linux CDF_HOME = /data/netcdf-3.5.1-beta13 CDF_INC = $(CDF_HOME)/include CDF_LIB = $(CDF_HOME)/lib MPI_HOME = /data/mpichgm-1.2.5.10a MPI_INC = $(MPI_HOME)/include MPI_LIB = $(MPI_HOME)/lib MPI_BIN = $MPI_HOME/bin MPI_RUN = $MPI_BIN/mpirun.ch_gm PGI_LIB = $(PGI_HOME)/lib GCC_LIB = /usr/lib64/gcc-lib/x86_64-suse-linux/3.2.2 L64_LIB = /usr/lib64 GM_LIB = $(GM_HOME)/lib GMB_LIB = $(GM_HOME)/binary/lib CDFLIBS = -L$(CDF_LIB) -lnetcdf MPILIBS = -L$(GMB_LIB) -L$(GM_LIB) -L$(MPI_LIB) \ -L$(PGI_LIB) -L$(L64_LIB) -L$(GCC_LIB) \ -lmpich -lgm -lpthread GM_NP = -np $hw_ncpu GM_MF = -machinefile $MF_HOME/n00x2.mf GM_RECV = --gm-recv blocking GM_SHMEM = --gm-no-shmem # # env: # ENV_SPEC_HPG_PARALLEL={MPI,MPI_OMP} # ########################################################################### # # SERIAL # ########################################################################### 360.wrf_s=default=serial=default: CPORTABILITY = FPORTABILITY = CPPPORTABILITY = -E COPTIMIZE = -O3 -fast FOPTIMIZE = -O3 -fast EXTRA_CFLAGS = -I. -I$(CDF_INC) EXTRA_FFLAGS = -I. -I$(CDF_INC) EXTRA_CPPFLAGS = -I. -I$(CDF_INC) EXTRA_LDFLAGS = $(CDFLIBS) 361.wrf_m=default=serial=default: CPORTABILITY = FPORTABILITY = CPPPORTABILITY = -E COPTIMIZE = -O3 -fast FOPTIMIZE = -O3 -fast EXTRA_CFLAGS = -I. -I$(CDF_INC) EXTRA_FFLAGS = -I. -I$(CDF_INC) EXTRA_CPPFLAGS = -I. -I$(CDF_INC) EXTRA_LDFLAGS = $(CDFLIBS) ########################################################################### # # MPI # ########################################################################### 360.wrf_s=default=mpi=default: CPORTABILITY = FPORTABILITY = CPPPORTABILITY = -E COPTIMIZE = -O3 -fast FOPTIMIZE = -O3 -fast EXTRA_CFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_FFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_CPPFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_LDFLAGS = EXTRA_LIBS = $(CDFLIBS) $(MPILIBS) use_submit_for_speed = yes ENV_SPEC_HPG_PARALLEL = MPI BENCH_NAME = 360.wrf_s HPC_CP = cp -rf $ENVR_HOME/$BENCH_NAME.env ~/.ssh/environment HPC_RM = rm -rf ~/.ssh/environment MPIRUN = $MPI_RUN $GM_NP $GM_MF $GM_RECV $GM_SHMEM -wd `pwd` submit = $HPC_CP ; $MPIRUN $command ; $HPC_RM notes000 = Flags: notes005 = Fortran: notes010 = -O3 -fast notes015 = C: notes020 = -O3 -fast notes025 = Preprocessor: notes030 = -E notes035 = notes040 = Alternate Source: notes045 = none. notes050 = notes055 = Submit command to run applications: notes060 = mpirun.ch_gm -np -machinefile n00x2.mf notes065 = --gm-recv blocking --gm-no-shmem $command notes070 = notes075 = Set the following BIOS parameters: notes080 = DRAM Interleave = AUTO notes085 = Node Interleave = Disabled notes090 = ACPI SRAT = Enabled 361.wrf_m=default=mpi=default: CPORTABILITY = FPORTABILITY = CPPPORTABILITY = -E COPTIMIZE = -O3 -fast FOPTIMIZE = -O3 -fast EXTRA_CFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_FFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_CPPFLAGS = -I. -I$(CDF_INC) -I$(MPI_INC) EXTRA_LDFLAGS = EXTRA_LIBS = $(CDFLIBS) $(MPILIBS) use_submit_for_speed = yes ENV_SPEC_HPG_PARALLEL = MPI BENCH_NAME = 361.wrf_m HPC_CP = cp -rf $ENVR_HOME/$BENCH_NAME.env ~/.ssh/environment HPC_RM = rm -rf ~/.ssh/environment MPIRUN = $MPI_RUN $GM_NP $GM_MF $GM_RECV $GM_SHMEM -wd `pwd` submit = $HPC_CP ; $MPIRUN $command ; $HPC_RM notes005 = Flags: notes010 = Fortran: notes015 = -O3 -fast notes020 = C: notes025 = -O3 -fast notes030 = Preprocessor: notes035 = -E # notes040 = # notes045 = Alternate Source: # notes050 = none. notes055 = notes060 = Submit command to run applications: notes065 = mpirun.ch_gm -np NUMPROCS -machinefile n00x2.mf --gm-recv notes070 = blocking -wd `pwd` --gm-no-shmem $command notes075 = notes080 = Set the following BIOS parameters: notes085 = DRAM Interleave = AUTO notes090 = Node Interleave = Disabled notes095 = ACPI SRAT = Enabled notes100 = notes105 = Cluster Configuration: notes110 = Two CPUs per node notes115 = All benchmark files are on a shared file server notes120 = Nodes and file server use NFS shared file system