#!/bin/tcsh # Sample Batch Script for a MVAPICH2-Intel job # on forge # # # # Submit this script using the command: qsub # # Use the "qstat" command to check the status of a job. # # The following are embedded QSUB options. The syntax is #PBS (the # does # _not_ denote that the lines are commented out so do not remove). # # walltime : maximum wall clock time (hh:mm:ss) #PBS -l walltime=24:00:00 # # nodes: number of 16-core nodes # ppn: how many cores per node to use (1 through 16) # (you are always charged for the entire node) #PBS -l nodes=1:ppn=6 # # export all my environment variables to the job #PBS -V #PBS -q normal # job name (default = name of script file) #PBS -N vvvr # # # filename for standard output (default = .o) # at end of job, it is in directory from which qsub was executed # remove extra ## from the line below if you want to name your own file ###PBS -o testjob.out # # filename for standard error (default = .e) # at end of job, it is in directory from which qsub was executed # remove extra ## from the line below if you want to name your own file ###PBS -e testjob.err # # End of embedded QSUB options # # set echo # echo commands before execution; use for debugging # cd ${HOME}/code/yank/proof-of-concept/alchemical-repex-volume setenv NP `wc -l ${PBS_NODEFILE} | cut -d'/' -f1` ##setenv MV2_SRQ_SIZE 4000 ### setenv any other vars you might need ### run mpi: This will place ranks in the order in which nodes appear in ### the host file; the default, $PBS_NODEFILE, lists hosts in "packed" ### order, i.e., ### host1 ### host1 ### ... ### host2 ### host2 #mpirun_rsh -ssh -np ${NP} -hostfile ${PBS_NODEFILE} python alchemical-repex-volume.py cd $HOME/code/openmm/vvvr/openmm/waterbox #rm -f *.nc # Override number of processors. setenv NP 6 mpirun_rsh -ssh -np ${NP} -hostfile ${PBS_NODEFILE} python vvvr_waterbox_steadystate_mpi.py