[Ocfs2-tools-devel] [PATCH 1/1] Ocfs2-test: Do mounting among nodes with the help of remote_mount.py/remote_umount.py utiltiy.

Tristan Ye tristan.ye at oracle.com
Mon Sep 22 19:49:25 PDT 2008


Orignally use a remote ssh execution to perform mounting and umounting among nodes during
multi-nodes test.it can be simplified by using of existing remote_mount and remote_umount
utilities.

Signed-off-by: Tristan Ye <tristan.ye at oracle.com>
---
 programs/inline-data/multi-inline-run.sh |   78 ++++++++++++--------
 programs/xattr_tests/xattr-multi-run.sh  |  117 +++++++++++++++++-------------
 2 files changed, 114 insertions(+), 81 deletions(-)

diff --git a/programs/inline-data/multi-inline-run.sh b/programs/inline-data/multi-inline-run.sh
index 433d379..1188219 100755
--- a/programs/inline-data/multi-inline-run.sh
+++ b/programs/inline-data/multi-inline-run.sh
@@ -35,7 +35,9 @@ REMOTE_SH_BIN=${SSH_BIN}
 AWK_BIN="`which awk`"
 TOUCH_BIN="`which touch`"
 MOUNT_BIN="`which sudo` -u root `which mount`"
+REMOTE_MOUNT_BIN="`which sudo` -u root `which remote_mount.py`"
 UMOUNT_BIN="`which sudo` -u root `which umount`"
+REMOTE_UMOUNT_BIN="`which sudo` -u root `which remote_umount.py`"
 MKFS_BIN="`which sudo` -u root `which mkfs.ocfs2`"
 INLINE_DATA_BIN=`which multi-inline-data`
 INLINE_DIRS_BIN=`which multi-inline-dirs`
@@ -53,10 +55,10 @@ BLOCKNUMS=
 
 TMP_DIR=/tmp
 DEFAULT_HOSTFILE=".openmpi_hostfile"
-CLUSTER_CONFIG_FILE="/etc/ocfs2/cluster.conf"
 DEFAULT_RANKS=4
 
 declare -i MPI_RANKS
+MPI_HOSTS=
 MPI_HOSTFILE=
 MPI_ACCESS_METHOD="ssh"
 MPI_PLS_AGENT_ARG="-mca pls_rsh_agent ssh:rsh"
@@ -121,10 +123,10 @@ exit_or_not()
 ################################################################################
 f_usage()
 {
-    echo "usage: `basename ${0}` [-r MPI_ranks] [-f MPI_hostfile] [-a access_method] [-o output] <-d <device>> <mountpoint path>"
+    echo "usage: `basename ${0}` [-r MPI_ranks] <-f MPI_hosts> [-a access_method] [-o output] <-d <device>> <mountpoint path>"
     echo "       -r size of MPI rank"
     echo "       -a access method for process propagation,should be ssh or rsh,set ssh as a default method when omited."
-    echo "       -f MPI hostfile,by default,script would generate one by referring to /etc/ocfs2/cluster.conf"
+    echo "       -f MPI hosts list,separated by comma,e.g -f node1.us.oracle.com,node2.us.oracle.com."
     echo "       -o output directory for the logs"
     echo "       -d device name used for ocfs2 volume"
     echo "       <mountpoint path> path of mountpoint where the ocfs2 volume will be mounted on."
@@ -143,7 +145,7 @@ f_getoptions()
                 case $options in
 		a ) MPI_ACCESS_METHOD="$OPTARG";;
 		r ) MPI_RANKS="$OPTARG";;
-		f ) MPI_HOSTFILE="$OPTARG";;
+		f ) MPI_HOSTS="$OPTARG";;
                 o ) LOG_OUT_DIR="$OPTARG";;
                 d ) OCFS2_DEVICE="$OPTARG";;
                 h ) f_usage
@@ -159,27 +161,29 @@ f_getoptions()
 
 f_create_hostfile()
 {
-        MPI_HOSTFILE="${TMP_DIR}/${DEFAULT_HOSTFILE}"
+	MPI_HOSTFILE="${TMP_DIR}/${DEFAULT_HOSTFILE}"
+        TMP_FILE="${TMP_DIR}/.tmp_openmpi_hostfile_$$"
 
-        TMP_FILE="${TMP_DIR}/.tmp_openmpi_hostfile"
+        echo ${MPI_HOSTS}|sed -e 's/,/\n/g'>$TMP_FILE
 
         if [ -f "$MPI_HOSTFILE" ];then
                 ${RM} -rf ${MPI_HOSTFILE}
         fi
 
-        cat ${CLUSTER_CONFIG_FILE}|grep name|sed '$ d'|${AWK_BIN} '{print $3}'>$TMP_FILE
-
         while read line
         do
+                if [ -z $line ];then
+                        continue
+                fi
+
                 echo "$line      slots=2">>$MPI_HOSTFILE
 
         done<$TMP_FILE
 
-        ${RM} -rf $TMP_FILE
 
+        ${RM} -rf $TMP_FILE
 }
 
-
 f_setup()
 {
 	rpm -q --quiet openmpi ||{
@@ -210,12 +214,10 @@ f_setup()
         DIRS_LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/multiple-inline-dirs-test-${LOG_POSTFIX}.log"
         RUN_LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/run-${LOG_POSTFIX}.log"
 
-	if [ -z "$MPI_HOSTFILE" ];then
-                f_create_hostfile
+	if [ -z "$MPI_HOSTS" ];then
+		f_usage
         else
-                if [ ! -f "${MPI_HOSTFILE}" ];then
-                        f_usage
-                fi
+		f_create_hostfile
         fi
 
 }
@@ -224,33 +226,45 @@ f_do_mkfs_and_mount()
 {
 	echo -n "Mkfsing device(-b ${BLOCKSIZE} -C ${CLUSTERSIZE}): "|tee -a ${RUN_LOG_FILE}
 
-	echo y|${MKFS_BIN} --fs-features=inline-data -b ${BLOCKSIZE} -C ${CLUSTERSIZE} -N 4 ${OCFS2_DEVICE} ${BLOCKNUMS}>>${RUN_LOG_FILE} 2>&1
+	echo y|${MKFS_BIN} --fs-features=inline-data -b ${BLOCKSIZE} -C ${CLUSTERSIZE} -N 4 -L oracle_home ${OCFS2_DEVICE} ${BLOCKNUMS}>>${RUN_LOG_FILE} 2>&1
 
 	RET=$?
 	echo_status ${RET} |tee -a ${RUN_LOG_FILE}
 	exit_or_not ${RET}
 
-	while read node_line ; do
-		host_node=`echo ${node_line}|${AWK_BIN} '{print $1}'`
-		echo -n "Mounting device to ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
-		RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/mount -t ocfs2 -o rw,nointr ${OCFS2_DEVICE} ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
-		echo_status ${RET} |tee -a ${RUN_LOG_FILE}
-		exit_or_not ${RET}
-			
-	done<${MPI_HOSTFILE}
+#	while read node_line ; do
+#		host_node=`echo ${node_line}|${AWK_BIN} '{print $1}'`
+#		echo -n "Mounting device to ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
+#		RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/mount -t ocfs2 -o rw,nointr ${OCFS2_DEVICE} ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
+#		echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+#		exit_or_not ${RET}
+#			
+#	done<${MPI_HOSTFILE}
+	echo -n "Mounting device ${OCFS2_DEVICE} to nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
+        ${REMOTE_MOUNT_BIN} -l oracle_home -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
+        ret=$?
+        echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+        exit_or_not ${RET}
+
 
 } 
 
 f_do_umount()
 {
-	while read node_line;do
-		host_node=`echo ${node_line}|awk '{print $1}'`
-		echo -ne "Unmounting device from ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
-		RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/umount ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
-		echo_status ${RET} |tee -a ${RUN_LOG_FILE}
-		exit_or_not ${RET}
-		
-	done<${MPI_HOSTFILE}
+#	while read node_line;do
+#		host_node=`echo ${node_line}|awk '{print $1}'`
+#		echo -ne "Unmounting device from ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
+#		RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/umount ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
+#		echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+#		exit_or_not ${RET}
+#		
+#	done<${MPI_HOSTFILE}
+
+	echo -n "Umounting device ${OCFS2_DEVICE} from nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
+        ${REMOTE_UMOUNT_BIN} -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
+        ret=$?
+        echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+        exit_or_not ${RET}
 
 }
 
diff --git a/programs/xattr_tests/xattr-multi-run.sh b/programs/xattr_tests/xattr-multi-run.sh
index 92ec76c..b4f5aa6 100755
--- a/programs/xattr_tests/xattr-multi-run.sh
+++ b/programs/xattr_tests/xattr-multi-run.sh
@@ -37,7 +37,9 @@ REMOTE_SH_BIN=${SSH_BIN}
 AWK_BIN="`which awk`"
 TOUCH_BIN="`which touch`"
 MOUNT_BIN="`which sudo` -u root `which mount`"
+REMOTE_MOUNT_BIN="`which sudo` -u root `which remote_mount.py`"
 UMOUNT_BIN="`which sudo` -u root `which umount`"
+REMOTE_UMOUNT_BIN="`which sudo` -u root `which remote_umount.py`"
 MKFS_BIN="`which sudo` -u root `which mkfs.ocfs2`"
 CHMOD_BIN="`which sudo` -u root `which chmod`"
 CHOWN_BIN="`which sudo` -u root `which chown`"
@@ -59,10 +61,10 @@ WORKPLACE=
 
 TMP_DIR=/tmp
 DEFAULT_HOSTFILE=".openmpi_hostfile"
-CLUSTER_CONFIG_FILE="/etc/ocfs2/cluster.conf"
 DEFAULT_RANKS=4
 
 declare -i MPI_RANKS
+MPI_HOSTS=
 MPI_HOSTFILE=
 MPI_ACCESS_METHOD="ssh"
 MPI_PLS_AGENT_ARG="-mca pls_rsh_agent ssh:rsh"
@@ -123,10 +125,10 @@ exit_or_not()
 ################################################################################
 f_usage()
 {
-    echo "usage: `basename ${0}` [-r MPI_Ranks] [-f MPI_Hostfile] [-a access method] [-o output] <-d <device>> <mountpoint path>"
+    echo "usage: `basename ${0}` [-r MPI_Ranks] <-f MPI_Hosts> [-a access method] [-o output] <-d <device>> <mountpoint path>"
     echo "       -r size of MPI rank"
     echo "       -a access method for process propagation,should be ssh or rsh,set ssh as a default method when omited."
-    echo "       -f MPI hostfile,script would generate one by referring /etc/ocfs2/cluster.conf by default"
+    echo "       -f MPI hosts list,separated by comma,e.g -f node1.us.oracle.com,node2.us.oracle.com."
     echo "       -o output directory for the logs"
     echo "       -d specify the device which has been formated as an ocfs2 volume."
     echo "       <mountpoint path> path of mountpoint where the ocfs2 volume will be mounted on."
@@ -143,7 +145,7 @@ f_getoptions()
 	 while getopts "o:d:r:f:a:h:" options; do
                 case $options in
 		r ) MPI_RANKS="$OPTARG";;
-                f ) MPI_HOSTFILE="$OPTARG";;
+                f ) MPI_HOSTS="$OPTARG";;
                 o ) LOG_OUT_DIR="$OPTARG";;
                 d ) OCFS2_DEVICE="$OPTARG";;
 		a ) MPI_ACCESS_METHOD="$OPTARG";;
@@ -157,28 +159,34 @@ f_getoptions()
 	MOUNT_POINT=${1}
 
 }
+
 f_create_hostfile()
 {
         MPI_HOSTFILE="${TMP_DIR}/${DEFAULT_HOSTFILE}"
+	TMP_FILE="${TMP_DIR}/.tmp_openmpi_hostfile_$$"
 
-        TMP_FILE="${TMP_DIR}/.tmp_openmpi_hostfile"
-
-	if [ -f "$MPI_HOSTFILE" ];then
-		${RM} -rf ${MPI_HOSTFILE}
-	fi
+	echo ${MPI_HOSTS}|sed -e 's/,/\n/g'>$TMP_FILE
 
-        cat ${CLUSTER_CONFIG_FILE}|grep name|sed '$ d'|${AWK_BIN} '{print $3}'>$TMP_FILE
+        if [ -f "$MPI_HOSTFILE" ];then
+                ${RM} -rf ${MPI_HOSTFILE}
+        fi
 
         while read line
         do
+		if [ -z $line ];then
+			continue
+		fi
+
                 echo "$line      slots=2">>$MPI_HOSTFILE
 
         done<$TMP_FILE
 
+
         ${RM} -rf $TMP_FILE
 
 }
 
+
 f_setup()
 {
 	rpm -q --quiet openmpi ||{
@@ -208,13 +216,12 @@ f_setup()
 	LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/xattr-multiple-test-log-${LOG_POSTFIX}.log"
 	RUN_LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/run-${LOG_POSTFIX}.log"
 	
-	if [ -z "$MPI_HOSTFILE" ];then
-                f_create_hostfile
-        else
-                if [ ! -f "${MPI_HOSTFILE}" ];then
-                        f_usage
-                fi
-        fi
+	if [ -z "$MPI_HOSTS" ];then
+		f_usage
+	else
+		f_create_hostfile
+	fi
+
 
 	${CHMOD_BIN} -R 777 ${MOUNT_POINT}
 
@@ -228,20 +235,25 @@ f_do_mkfs_and_mount()
 {
         echo -n "Mkfsing device(-b ${BLOCKSIZE} -C ${CLUSTERSIZE}): "|tee -a ${RUN_LOG_FILE}
 
-        echo y|${MKFS_BIN} --fs-features=xattr -b ${BLOCKSIZE} -C ${CLUSTERSIZE} -N 4 ${OCFS2_DEVICE} ${BLOCKNUMS}>>${RUN_LOG_FILE} 2>&1
+        echo y|${MKFS_BIN} --fs-features=xattr -b ${BLOCKSIZE} -C ${CLUSTERSIZE} -N 4 -L oracle_home ${OCFS2_DEVICE} ${BLOCKNUMS}>>${RUN_LOG_FILE} 2>&1
 
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
 
-        while read node_line ; do
-                host_node=`echo ${node_line}|${AWK_BIN} '{print $1}'`
-                echo -n "Mounting device to ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
-                RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/mount -t ocfs2 -o rw,nointr ${OCFS2_DEVICE} ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
-                echo_status ${RET} |tee -a ${RUN_LOG_FILE}
-                exit_or_not ${RET}
-
-        done<${MPI_HOSTFILE}
+#        while read node_line ; do
+#                host_node=`echo ${node_line}|${AWK_BIN} '{print $1}'`
+#                echo -n "Mounting device to ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
+#                RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/mount -t ocfs2 -o rw,nointr ${OCFS2_DEVICE} ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
+#                echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+#                exit_or_not ${RET}
+#
+#        done<${MPI_HOSTFILE}
+	echo -n "Mounting device ${OCFS2_DEVICE} to nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
+	${REMOTE_MOUNT_BIN} -l oracle_home -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
+	ret=$?
+	echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+        exit_or_not ${RET}
 
         ${MKDIR} -p ${WORKPLACE} || exit 1
 
@@ -249,14 +261,20 @@ f_do_mkfs_and_mount()
 
 f_do_umount()
 {
-        while read node_line;do
-                host_node=`echo ${node_line}|awk '{print $1}'`
-                echo -ne "Unmounting device from ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
-                RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/umount ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
-                echo_status ${RET} |tee -a ${RUN_LOG_FILE}
-                exit_or_not ${RET}
+#        while read node_line;do
+#                host_node=`echo ${node_line}|awk '{print $1}'`
+#                echo -ne "Unmounting device from ${MOUNT_POINT} on ${host_node}:"|tee -a ${RUN_LOG_FILE}
+#                RET=$(${REMOTE_SH_BIN} -n ${host_node} "sudo /bin/umount ${MOUNT_POINT};echo \$?" 2>>${RUN_LOG_FILE})
+#                echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+#                exit_or_not ${RET}
+#
+#        done<${MPI_HOSTFILE}
 
-        done<${MPI_HOSTFILE}
+	echo -n "Umounting device ${OCFS2_DEVICE} from nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
+	${REMOTE_UMOUNT_BIN} -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
+	ret=$?
+	echo_status ${RET} |tee -a ${RUN_LOG_FILE}
+	exit_or_not ${RET}
 
 }
 
@@ -272,10 +290,10 @@ f_runtest()
 	do
 		for filetype in normal directory symlink
 		do
-			echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}">>${LOG_FILE}
+			echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}">>${LOG_FILE}
 			echo "********${namespace} mode on ${filetype}********">>${LOG_FILE}
 
-			${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
+			${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
 			rc=$?
 			if [ "$rc" != "0" ];then
 				if [ "$namespace" == "user" -a "$filetype" == "symlink" ]; then
@@ -313,8 +331,8 @@ f_runtest()
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
 	for((i=0;i<4;i++));do
-		echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}">>${LOG_FILE}
-		${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}>>${LOG_FILE} 2>&1
+		echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}">>${LOG_FILE}
+		${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}>>${LOG_FILE} 2>&1
 		rc=$?
 		if [ ! "$rc" == "0"  ];then
 			echo_failure |tee -a ${RUN_LOG_FILE}
@@ -335,8 +353,8 @@ f_runtest()
 	echo -ne "Check Max Multinode Xattr EA_Name_Length:">> ${LOG_FILE}
 	echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-	echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}">>${LOG_FILE}
-	${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}>>${LOG_FILE} 2>&1
+	echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}">>${LOG_FILE}
+	${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}>>${LOG_FILE} 2>&1
 	RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -349,8 +367,8 @@ f_runtest()
         echo -ne "Check Max Multinode Xattr EA_Size:">> ${LOG_FILE}
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}">>${LOG_FILE}
-        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
+        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}">>${LOG_FILE}
+        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -363,8 +381,8 @@ f_runtest()
         echo -ne "Check Huge Multinode Xattr EA_Entry_Nums:">> ${LOG_FILE}
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}">>${LOG_FILE}
-        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
+        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}">>${LOG_FILE}
+        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -377,8 +395,8 @@ f_runtest()
         echo -ne "Check All Max Multinode Xattr Arguments Together:">> ${LOG_FILE}
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}">>${LOG_FILE}
-        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
+        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}">>${LOG_FILE}
+        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -391,8 +409,8 @@ f_runtest()
         echo -ne "Launch Concurrent Adding Test:">> ${LOG_FILE}
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}">>${LOG_FILE}
-        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
+        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}">>${LOG_FILE}
+        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -405,8 +423,8 @@ f_runtest()
         echo -ne "Launch MultiNode Xattr Stress Test:">> ${LOG_FILE}
         echo >>${LOG_FILE}
         echo "==========================================================">>${LOG_FILE}
-        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000  -r -k ${WORKPLACE}">>${LOG_FILE}
-        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --hostfile ${MPI_HOSTFILE} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000  -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
+        echo -e "Testing Binary:\t\t${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000  -r -k ${WORKPLACE}">>${LOG_FILE}
+        ${MPIRUN_BIN} ${MPI_PLS_AGENT_ARG} -mca btl tcp,self -mca btl_tcp_if_include eth0 -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000  -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
         RET=$?
         echo_status ${RET} |tee -a ${RUN_LOG_FILE}
         exit_or_not ${RET}
@@ -420,6 +438,7 @@ f_cleanup()
 	if [ -f "$TMP_FILE" ];then
                 ${RM} -rf $TMP_FILE
         fi
+
 }
 
 ################################################################################
-- 
1.5.5




More information about the Ocfs2-tools-devel mailing list