[Pvfs2-cvs] commit by kunkel in pvfs2/test/automated/mpiio-tests.d: stadler-file-view-test.std stadler-file-view-test.cpp ior-mpiio-3 ior-mpiio-2 stadler-file-view-test

CVS commit program cvs at parl.clemson.edu
Sat Feb 17 06:17:18 EST 2007


Update of /projects/cvsroot/pvfs2/test/automated/mpiio-tests.d
In directory parlweb1:/tmp/cvs-serv2872/test/automated/mpiio-tests.d

Added Files:
      Tag: kunkel-migration-branch
	stadler-file-view-test.std stadler-file-view-test.cpp 
	ior-mpiio-3 ior-mpiio-2 stadler-file-view-test 
Log Message:
Update migration branch to current CVS version


--- /dev/null	2004-06-24 14:04:38.000000000 -0400
+++ stadler-file-view-test.std	2007-02-17 06:17:18.000000000 -0500
@@ -0,0 +1,3 @@
+12385 12386 12387 12388 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12353 12642 12643 12644 12609 12646 12647 12648 12865 12650 12651 12652 13121 12654 12655 12656 12897 12898 12899 12900 12901 12902 12903 12904 12905 12906 12907 12908 12909 12910 12911 12912 13153 13154 13155 13156 13157 13158 13159 13160 13161 13162 13163 13164 13165 13166 13167 13168 
+size=4, chunk_size=16, subchunk_size=4
+file_size=128

--- /dev/null	2004-06-24 14:04:38.000000000 -0400
+++ stadler-file-view-test.cpp	2007-02-17 06:17:18.000000000 -0500
@@ -0,0 +1,94 @@
+/*
+From: Stadler Hans-Christian <hans-christian.stadler at psi.ch>
+To: mpich2-maint at mcs.anl.gov
+Subject: [MPICH2 Req #2077] Wrong Transfer Size in MPI::File::Write_at (mpich2-1.0.2p1)
+Date: Tue, 8 Nov 2005 14:42:38 +0100
+
+In MPI-2 July 18, 1997
+
+1) on page 225: The offset is always in etype units relative to the current
+view.  
+2) on page 226: A data access routine attempts to transfer (read or write)
+count data items of type datatype.
+
+In the test below, the file consists of 3 consecutive instances of an array of
+12 shorts. The fileview of each node consists of subarrays of 4 shorts, one
+subarray for each consecutive array instance.  Each of the 3 nodes tries to
+write one element ( of datatype short) to the first element (of etype short) of
+its own subarray within the second consecutive array instance.
+
+Erroneous behaviour: However, each node writes 4 elements (of datatype short)
+to its own subarray.
+
+Thie requirement (2) is violated in this case. The offset is handled correctly
+as specified in (1).
+
+Hans-Christian Stadler
+
+*/
+
+#include <iostream>
+
+#undef SEEK_CUR
+#undef SEEK_SET
+#undef SEEK_END
+#include <mpi.h>
+
+using namespace std;
+
+int main (int argc, char *argv[])
+{
+        MPI::Intracomm &world = MPI::COMM_WORLD;
+        MPI::File fh;
+        MPI::Datatype &base = MPI::SHORT;
+        MPI::Datatype view;
+        int size, rank;
+        int chunk_size, subchunk_size, subchunk_offset;
+        short *buf;
+	short *reference;
+        int i;
+
+        MPI::Init(argc, argv);
+        size = world.Get_size();
+        rank = world.Get_rank();
+        subchunk_size = 4;
+        chunk_size = size * subchunk_size;
+        subchunk_offset = subchunk_size * rank;
+        buf = new short[chunk_size];
+        for (i=0; i<chunk_size; ++i)
+                buf[i] = ('0'+rank) << 8 | ('a'+i);
+
+        try {
+                view = base.Create_subarray(1, &chunk_size, &subchunk_size, 
+				&subchunk_offset, MPI::ORDER_C);
+                view.Commit();
+                fh = MPI::File::Open(world, argv[1],
+				MPI::MODE_RDWR|MPI::MODE_CREATE, 
+				MPI::INFO_NULL);
+                fh.Write_at(chunk_size*rank*sizeof(short), buf, 
+				chunk_size, base);
+                fh.Set_view(0, base, view, "native", MPI::INFO_NULL);
+                for (i=0; i<chunk_size; ++i)
+                        buf[i] = ('0'+rank) << 8 | ('A'+i);
+                fh.Write_at(subchunk_size, buf, 1, base);
+                fh.Close();
+        } catch (MPI::Exception &ex) {
+                cerr << "MPI Exception: " << ex.Get_error_string() << endl;
+                world.Abort(-1);
+        }
+
+        if (! rank) {
+		fh = MPI::File::Open(MPI::COMM_SELF, argv[1], 
+				MPI::MODE_RDONLY, MPI::INFO_NULL);
+		fh.Read(buf, chunk_size*size, MPI::SHORT);
+		fh.Close();
+		for (int j=0; j<chunk_size*size; j++)
+			cout << buf[j] << " ";
+		cout << endl;
+
+                cout << "size=" << size << ", chunk_size=" << chunk_size << ", subchunk_size=" << subchunk_size << endl;
+                cout << "file_size=" << chunk_size*size*sizeof(short) << endl;
+        }
+        MPI::Finalize();
+        return 0;
+}

--- /dev/null	2004-06-24 14:04:38.000000000 -0400
+++ ior-mpiio-3	2007-02-17 06:17:19.000000000 -0500
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+. functions
+
+##
+## entry point for script
+##
+
+IOR=${CLUSTER_DIR}/IOR
+
+(cd ${EXTRA_TESTS}/IOR-2.8.6 && make mpiio && cp src/C/IOR ${IOR} )
+
+if [ $? -eq 1 ] ; then
+	exit 1
+fi
+
+# like the other mpiio tests we can only do multi processor tests if there's a
+# pav config file we can use
+# If we have to we can fall back to single processor, and still do something
+# reasonable. 
+
+if [ -f $PAV_CONFIG ] ; then 
+	# write out a pbs script
+	pbs_script=${CLUSTER_DIR}/ior3.sh
+	# -F: one file per process
+	# the '-e' option (fsync after write) is not available with MPIIO)
+	make_pbs_script ${IOR} -a MPIIO -C -i 3 -F -o pvfs2:\${MOUNTPOINT}/iortest > $pbs_script
+
+	# submit it
+	job_id=$(qsub -N ior3 $pbs_script | cut -d . -f1)
+
+	# wait patently for it to complete
+	block_until_done $job_id
+
+	# need to get results into per-test log files
+	cat ior3.o$job_id
+
+	# need to know if we failed or not
+	egrep -q '(Abort:|Assertion.*failed|Actual file size)' ior3.o$job_id
+	if [ $? -eq 0 ] ; then
+		exit 1
+	fi
+	grep -q 'Max Read' ior3.o$job_id
+fi

--- /dev/null	2004-06-24 14:04:38.000000000 -0400
+++ ior-mpiio-2	2007-02-17 06:17:19.000000000 -0500
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+. functions
+
+##
+## entry point for script
+##
+
+IOR=${CLUSTER_DIR}/IOR
+
+(cd ${EXTRA_TESTS}/IOR-2.8.6 && make mpiio && cp src/C/IOR ${IOR} )
+
+if [ $? -eq 1 ] ; then
+	exit 1
+fi
+
+# like the other mpiio tests we can only do multi processor tests if there's a
+# pav config file we can use
+# If we have to we can fall back to single processor, and still do something
+# reasonable. 
+
+if [ -f $PAV_CONFIG ] ; then 
+	# write out a pbs script
+	pbs_script=${CLUSTER_DIR}/ior2.sh
+	# the '-e' (fsync after write) option is not avaliable to MPIIO
+	make_pbs_script ${IOR} -a MPIIO -C -i 3 -o pvfs2:\${MOUNTPOINT}/iortest > $pbs_script
+
+	# submit it
+	job_id=$(qsub -N ior2 $pbs_script | cut -d . -f1)
+
+	# wait patently for it to complete
+	block_until_done $job_id
+
+	# need to get results into per-test log files
+	cat ior2.o$job_id
+
+	# need to know if we failed or not
+	egrep -q '(Abort:|Assertion.*failed|Actual file size)' ior2.o$job_id
+	if [ $? -eq 0 ] ; then
+		exit 1
+	fi
+	grep -q 'Max Read' ior2.o$job_id
+fi

--- /dev/null	2004-06-24 14:04:38.000000000 -0400
+++ stadler-file-view-test	2007-02-17 06:17:19.000000000 -0500
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+. functions
+
+##
+## entry point for script
+##
+
+FILE_VIEW_TEST=${CLUSTER_DIR}/file_view_test
+
+${CLUSTER_DIR}/mpich2/bin/mpicxx ${PVFS2_DEST}/pvfs2-${CVS_TAG}/test/automated/mpiio-tests.d/stadler-file-view-test.cpp -o $FILE_VIEW_TEST
+
+if [ $? -eq 1 ] ; then
+	exit 1
+fi
+
+# like the other mpiio tests we can only do multi processor tests if there's a
+# pav config file we can use
+# If we have to we can fall back to single processor, and still do something
+# reasonable. 
+
+if [ -f $PAV_CONFIG ] ; then 
+	# write out a pbs script
+	pbs_script=${CLUSTER_DIR}/fileviewtest.sh
+	make_pbs_script ${FILE_VIEW_TEST} pvfs2:\${MOUNTPOINT}/fview-1 > $pbs_script
+
+	# submit it
+	job_id=$(qsub -N fileview $pbs_script | cut -d . -f1)
+
+	# wait patently for it to complete
+	block_until_done $job_id
+
+	# need to get results into per-test log files
+	cat fileview.o$job_id
+
+	# need to know if we failed or not
+	grep -q "`cat ${PVFS2_DEST}/pvfs2-${CVS_TAG}/test/automated/mpiio-tests.d/stadler-file-view-test.std`" fileview.o$job_id 
+fi



More information about the Pvfs2-cvs mailing list