debian/0000775000000000000000000000000012253224765007200 5ustar debian/rules0000775000000000000000000000736511743022157010265 0ustar #!/usr/bin/make -f #-*- makefile -*- # Made with the aid of dh_make, by Craig Small # Sample debian/rules that uses debhelper. GNU copyright 1997 by Joey Hess. # Also some stuff taken from debmake scripts, by Christoph Lameter. # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 # This is the debhelper compatability version to use. #export DH_COMPAT=1 # OpenMPI is only supported on a subset of architectures at this time. # This list should remain synced with the Build-Depends and the Architecture # line of netpipe-openmpi in debian/control. OPENMPI_ARCH = alpha amd64 armel armhf i386 ia64 powerpc powerpcspe sparc sparc64 kfreebsd-i386 kfreebsd-amd64 hurd-i386 DEB_HOST_ARCH ?= $(shell dpkg-architecture -qDEB_HOST_ARCH) build: build-arch build-indep build-arch: build-stamp build-indep: build-stamp build-stamp: dh_testdir $(MAKE) tcp cp debian/netpipe.1 NPtcp.1 $(MAKE) pvm cp debian/netpipe.1 NPpvm.1 $(MAKE) mpi MPICC=mpicc.lam mv NPmpi NPlam cp debian/netpipe.1 NPlam.1 ifneq (,$(findstring $(DEB_HOST_ARCH),$(OPENMPI_ARCH))) $(MAKE) mpi MPICC=mpicc.openmpi mv NPmpi NPopenmpi cp debian/netpipe.1 NPopenmpi.1 endif # $(MAKE) mpi MPICC=mpicc.mpich # mv NPmpi NPmpich # cp debian/netpipe.1 NPmpich.1 $(MAKE) mpi MPICC=mpicc.mpich2 mv NPmpi NPmpich2 cp debian/netpipe.1 NPmpich2.1 # MPI2_INC not necessary, but must not be empty $(MAKE) mpi2 MPI2CC=mpicc.lam MPI2_INC=./ mv NPmpi2 NPlam2 cp debian/netpipe.1 NPlam2.1 ifneq (,$(findstring $(DEB_HOST_ARCH),$(OPENMPI_ARCH))) $(MAKE) mpi2 MPI2CC=mpicc.openmpi MPI2_INC=./ mv NPmpi2 NPopenmpi2 cp debian/netpipe.1 NPopenmpi2.1 endif touch build-stamp clean: dh_testdir dh_testroot rm -f build-stamp install-stamp $(MAKE) clean for i in tcp lam lam2 openmpi openmpi2 mpich mpich2 pvm; do rm -f NP$${i}*; done # rm -f NPtcp.1 NPlam.1 NPopenmpi.1 NPmpich.1 NPmpich2.1 NPopenmpi2.1 NPpvm.1 # rm -f NPtcp NPopenmpi NPmpich NPopenmpi2 NPlam2 NPpvm NPmpich2 NPlam dh_clean install: install-stamp install-stamp: build-stamp dh_testdir dh_testroot dh_prep dh_installdirs mkdir -p debian/tmp/usr/bin cp NPtcp NPlam NPlam2 NPmpich2 NPpvm `pwd`/debian/tmp/usr/bin # NPmpich ifneq (,$(findstring $(DEB_HOST_ARCH),$(OPENMPI_ARCH))) cp NPopenmpi NPopenmpi2 `pwd`/debian/tmp/usr/bin endif dh_movefiles touch install-stamp # Build architecture-independent files here. binary-indep: build install # dh_testversion dh_testdir -i dh_testroot -i dh_installdocs -i dh_installexamples -i dh_installmenu -i # dh_installemacsen -i # dh_installpam -i # dh_installinit -i dh_installcron -i # dh_installmanpages -i dh_installinfo -i # dh_undocumented dh_installchangelogs -i dh_link -i dh_compress -i dh_fixperms -i dh_installdeb -i # dh_perl -i dh_gencontrol -i dh_md5sums -i dh_builddeb -i # Build architecture-dependent files here. binary-arch: build install # dh_testversion dh_testdir -a dh_testroot -a dh_installdocs -a dh_installexamples -a dh_installmenu -a # dh_installemacsen -a # dh_installpam -a # dh_installinit -a dh_installcron -a dh_installman -a # dh_installmanpages -pnetpipe-openmpi NPtcp.1 NPmpich.1 NPpvm.1 netpipe.1 # dh_installmanpages -pnetpipe-pvm NPtcp.1 NPmpich.1 NPopenmpi.1 NPopenmpi2.1 netpipe.1 # dh_installmanpages -pnetpipe-tcp NPmpich.1 NPopenmpi.1 NPopenmpi2.1 NPpvm.1 netpipe.1 # dh_installmanpages -pnetpipe-mpich NPtcp.1 NPopenmpi.1 NPopenmpi2.1 NPpvm.1 netpipe.1 dh_installinfo -a # dh_undocumented dh_installchangelogs -a dh_strip -a dh_link -a dh_compress -a dh_fixperms -a dh_installdeb -a # dh_makeshlibs -a # dh_perl -a dh_shlibdeps -a dh_gencontrol -a dh_md5sums -a dh_builddeb -a source diff: @echo >&2 'source and diff are obsolete - use dpkg-source -b'; false binary: binary-arch .PHONY: build clean binary-indep binary-arch binary install debian/watch0000664000000000000000000000043711741362170010227 0ustar # Example watch control file for uscan # Rename this file to "watch" and then you can run the "uscan" command # to check for upstream updates and more. # Site Directory Pattern Version Script version=3 http://bitspjoule.org/netpipe/code/NetPIPE[_-]([0-9\.]*)\.tar\.gz debian uupdate debian/manpage_old0000664000000000000000000002135111741362170011365 0ustar .\" -*- nroff -*- .\" .\" NetPIPE -- Network Protocol Independent Performance Evaluator. .\" Copyright 1997, 1998 Iowa State University Research Foundation, Inc. .\" .\" This program is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation. You should have received a copy of the .\" GNU General Public License along with this program; if not, write to the .\" Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. .\" .\" netpipe.1 .\" Created: Mon Jun 15 1998 by Guy Helmer .\" .\" $Id: netpipe.1,v 1.3 1998/09/24 16:23:59 ghelmer Exp $ .\" .TH netpipe 1 "June 15, 1998" "NetPIPE" "netpipe" .SH NAME NetPIPE \- network protocol independent performance evaluator .SH SYNOPSIS .B NPtcp [\c .BI \-A \ buffer_alignment\fR\c ] [\c .BR \-a \c ] [\c .BI \-b \ TCP_buffer_size\fR\c ] [\c .BI \-h \ host_name\fR\c ] [\c .BI \-i \ increment\fR\c ] [\c .BI \-l \ starting_msg_size\fR\c ] [\c .BI \-O \ buffer_offset\fR\c ] [\c .BI \-o \ output_filename\fR\c ] [\c .BR \-P \c ] [\c .BI \-p \ port\fR\c ] [\c .BR \-r \c ] [\c .BR \-s \c ] [\c .BR \-t \c ] [\c .BI \-u \ ending_msg_size\fR\c ] .PP .B NPmpi [\c .BI \-A \ buffer_alignment\fR\c ] [\c .BR \-a \c ] [\c .BI \-i \ increment\fR\c ] [\c .BI \-l \ starting_msg_size\fR\c ] [\c .BI \-O \ buffer_offset\fR\c ] [\c .BI \-o \ output_filename\fR\c ] [\c .BR \-P \c ] [\c .BR \-s \c ] [\c .BI \-u \ ending_msg_size\fR\c ] .PP .B NPpvm [\c .BI \-A \ buffer_alignment\fR\c ] [\c .BR \-a \c ] [\c .BI \-i \ increment\fR\c ] [\c .BI \-l \ starting_msg_size\fR\c ] [\c .BI \-O \ buffer_offset\fR\c ] [\c .BI \-o \ output_filename\fR\c ] [\c .BR \-P \c ] [\c .BR \-r \c ] [\c .BR \-s \c ] [\c .BR \-t \c ] [\c .BI \-u \ ending_msg_size\fR\c ] .SH DESCRIPTION .PP .B NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, .B NetPIPE clearly shows the overhead associated with different protocol layers. .B NetPIPE answers such questions as: .RS How soon will a given data block of size k arrive at its destination? .PP Which network and protocol will transmit size k blocks the fastest? .PP What is a given network's effective maximum throughput and saturation level? .PP Does there exist a block size k for which the throughput is maximized? .PP How much communication overhead is due to the network communication protocol layer(s)? .PP How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? .RE .PP .B NetPIPE is provided with interfaces for TCP, MPI, and PVM, but TCP is the most commonly used interface for general network testing purposes. It should be easy to write new interfaces for other reliable protocols based on the examples provided by the TCP, MPI and PVM interfaces. .SH TESTING TCP .PP Typical use for TCP involves running the TCP NetPIPE receiver on one system with the command .PP .Ex NPtcp \-r .Ee .PP and running the TCP NetPIPE transmitter on another system with the command .PP .Ex NPtcp \-h receiver_hostname \-o output_filename \-P \-t .Ee .PP If any options are used that modify the test protocol, including \-i, \-l, \-p, \-s, and \-u, those parameters .B must be used on both the transmitter and the receiver, or the test will not run properly. .SH TESTING PVM .PP Typical use for PVM first requires starting PVM with the command .PP .Ex pvm .Ee .PP and adding a second machine with the PVM command .PP .Ex add othermachine .Ee .PP (then exit the PVM command line interface). Then run the PVM NetPIPE receiver on one system with the command .PP .Ex NPpvm \-r .Ee .PP and run the TCP NetPIPE transmitter on the other system with the command .PP .Ex NPpvm \-t \-o output_filename \-P .Ee .PP If any options are used that modify the test protocol, including \-i, \-l, \-p, \-s, and \-u, those parameters .B must be used on both the transmitter and the receiver, or the test will not run properly. .SH TESTING MPI .PP Use of the MPI interface for NetPIPE depends on the MPI implementation used. For the Argonne MPICH implementation using the p4 device (for a cluster of individual systems interconnected using TCP/IP), create a file that contains the hostnames of the two systems you want to include in the test, with one hostname on each line of the file (assume the file is named "machines.p4"). Then, use the command .PP .Ex mpirun \-machinefile machines.p4 \-np 2 NPmpi \-o output_filename \-P .Ee .PP to start the test. MPICH will start an NPmpi process on each of the two selected machines and the test will begin. .SH TESTING METHODOLOGY .PP .B NetPIPE tests network performance by sending a number of messages at each block size, starting from the lower bound on message size. .B NetPIPE increments the message size until the upper bound on message size is reached or the time to transmit a block exceeds one second, which ever occurs first. .PP .B NetPIPE\c \'s output file may be graphed with a program such as .B gnuplot(1) to view the results of the test. .B NetPIPE\c \'s output file contains five columns: time to transfer the block, bits per second, bits in block, bytes in block, and variance. These columns may be graphed to represent and compare the network's performance. For example, the .B network signature graph can be created by graphing time versus bits per second. Sample .B gnuplot(1) commands for such a graph would be .PP .Ex set logscale x .Ee .PP .Ex plot "NetPIPE.out" using 1:2 .Ee .PP The more traditional .B throughput versus block size graph can be created by graphing bytes versus bits per second. Sample .B gnuplot(1) commands for such a graph would be .PP .Ex set logscale x .Ee .PP .Ex plot "NetPIPE.out" using 4:2 .Ee .ne 5 .SH OPTIONS .TP .B \-A \ \fIalignment\fR Align buffers to the given boundary. For example, a value of 4 would align buffers to 4-byte (word) boundaries. .ne 3 .TP .B \-a Specify asynchronous receive (a.k.a. preposted receive), if the underlying protocol supports it. .ne 3 .TP .BI \-b \ \fIbuffer_size\fR [TCP only] Set send and receive TCP buffer sizes. .ne 3 .TP .BI \-h \ \fIhostname\fR [TCP transmitter only] Specify name of host to which to connect. .ne 3 .TP .BI \-i \ \fIincrement\fR Specify increment step size (default is an exponentially increasing increment). .ne 3 .TP .BI \-l \ \fIstart_msg_size\fR Specify the starting message size. The test will start with messages of this size and increment, either exponentially or with an increment specified by the .B \-i flag, until a block requires more than one second to transmit or the ending message size specified by the .B \-u flag is reached, which ever occurs first. .ne 3 .TP .BI \-O \ \fIbuffer_offset\fR Specify offset of buffers from alignment. For example, specifying an alignment of 4 (with \-A) and an offset of 1 would align buffers to the first byte after a word boundary. .ne 3 .TP .BI \-o \ \fIoutput_filename\fR Specify output filename. By default, the output filename is .IR NetPIPE.out . .ne 3 .\".TP .\".B \-P .\"Print results on screen during execution of the test. By default, .\"NetPIPE is silent during execution of the test. .\".ne 3 .TP .BI \-p \ \fIport_number\fR [TCP only] Specify TCP port number to which to connect (for the transmitter) or the port on which to listen for connections (for the receiver). .ne 3 .TP .B \-r [TCP only] This process is a TCP receiver. .ne 3 .TP .B \-s Set streaming mode: data is only transmitted in one direction. By default, the transmitter measures the time taken as each data block is sent from the transmitter to the receiver and back, then divides the round-trip time by two to obtain the time taken by the message to travel in each direction. In streaming mode, the receiver measures the time required to receive the message and sends the measured time back to the transmitter for posting to the output file. .ne 3 .TP .B \-t [TCP only] This process is a TCP transmitter. .ne 3 .TP .BI \-u \ \fIending_msg_size\fR Specify the ending message size. By default, the test will end when the time to transmit a block exceeds one second. If .B \-u is specified, the test will end when either the test time exceeds one second or the ending message size is reached, which ever occurs first. .ne 3 .SH FILES .TP .I NetPIPE.out Default output file for .BR NetPIPE . Overridden by the .B \-o option. .SH AUTHOR .PP Quinn Snell , Guy Helmer , and others. .PP Clark Dorman contributed the PVM interface. .PP Information about .B NetPIPE can be found on the World Wide Web at http://www.scl.ameslab.gov/netpipe/. .SH BUGS By nature, .B NetPIPE will use as much of the network bandwidth as possible. Other users of the network may notice the effect. debian/changelog0000664000000000000000000001353612253224765011062 0ustar netpipe (3.7.2-7build1) trusty; urgency=medium * No-change rebuild for libopenmpi1.3 -> libopenmpi1.6 transition. -- Logan Rosen Sat, 14 Dec 2013 23:02:53 -0500 netpipe (3.7.2-7) unstable; urgency=low * prioity set to extra -- Camm Maguire Thu, 10 May 2012 13:15:48 +0000 netpipe (3.7.2-6) unstable; urgency=low * mpich-bin -> mpich2 in netpipe-mpich2 depends. -- Camm Maguire Fri, 20 Apr 2012 13:30:26 +0000 netpipe (3.7.2-5) unstable; urgency=low * netpipe-pvm extra priority -- Camm Maguire Mon, 16 Apr 2012 17:46:01 +0000 netpipe (3.7.2-4) unstable; urgency=low * optional priority -- Camm Maguire Mon, 16 Apr 2012 15:51:58 +0000 netpipe (3.7.2-3) unstable; urgency=low * remove mpich dependency unless maintainer wants to preserve it. -- Camm Maguire Mon, 16 Apr 2012 13:55:23 +0000 netpipe (3.7.2-2) unstable; urgency=low * Priority: extra for override file -- Camm Maguire Thu, 12 Apr 2012 13:12:44 +0000 netpipe (3.7.2-1) unstable; urgency=low * New upstream release * lintian fixes -- Camm Maguire Wed, 11 Apr 2012 20:12:24 +0000 netpipe (3.7.1-2) unstable; urgency=low * reinstate legacy support for older mpi implementations still in use. -- Camm Maguire Mon, 02 Apr 2012 20:42:14 +0000 netpipe (3.7.1-1.1) unstable; urgency=low * Non-maintainer upload. * Replace deprecated MPI implementations with OpenMPI and MPICH2. (Closes: #571449) -- Nicholas Breen Wed, 14 Mar 2012 18:46:53 -0700 netpipe (3.7.1-1) unstable; urgency=low * New upstream release * Bug fix: "debian/watch fails to report upstream's version", thanks to Raphael Geissert (Closes: #450010). -- Camm Maguire Wed, 06 Jan 2010 19:07:32 +0000 netpipe (3.6.2-2) unstable; urgency=low * Fix dependencies for mpich and lam transitions, Closes: #323744. * newer standards -- Camm Maguire Fri, 7 Oct 2005 14:18:20 +0000 netpipe (3.6.2-1) unstable; urgency=high * New upstream release -- Camm Maguire Tue, 26 Oct 2004 20:28:24 +0000 netpipe (3.6-4) unstable; urgency=low * * Bug fix: "netpipe-tcp: invalid option -P", thanks to Kevin Turner (Closes: #251160). New manpage from upstream elides -P option. * Bug fix: "netpipe-tcp: option -t requires an argument", thanks to Kevin Turner (Closes: #251162). New manpage from upstrream elides obsolete -t option. * Bug fix: "option -r misdocumented in manpage", thanks to Kevin Turner (Closes: #251167). New manpage from upstream correctly documents -r option. * Bug fix: "netpipe-tcp: option -i misdocumented in manpage", thanks to Kevin Turner (Closes: #251169). New manpage from upstream correctly documents -i option. * Bug fix: "netpipe-tcp: output contains three columns, not five", thanks to Kevin Turner (Closes: #251619). New manpage from upstream correctly documents np.out file format. -- Camm Maguire Wed, 2 Jun 2004 13:43:28 +0000 netpipe (3.6-3) unstable; urgency=low * Earlier bug close misapplied, fix here. -- Camm Maguire Mon, 5 Apr 2004 20:30:19 +0000 netpipe (3.6-2) unstable; urgency=low * Bug fix: "netpipe-tcp: Option -P documented but not known", thanks to Stephane Bortzmeyer (Closes: #231882). Edited manpages and netpipe.c -- Camm Maguire Mon, 5 Apr 2004 20:26:06 +0000 netpipe (3.6-1) unstable; urgency=low * New upstream release -- Camm Maguire Fri, 23 Jan 2004 03:33:34 +0000 netpipe (3.5-3) unstable; urgency=low * Build-depend on lam4-dev * Newer standards * debhelper compat level 4 -- Camm Maguire Wed, 21 Jan 2004 21:02:27 +0000 netpipe (3.5-2) unstable; urgency=low * Applied 64bit safe patches, Closes: #214501 -- Camm Maguire Tue, 14 Oct 2003 17:19:19 +0000 netpipe (3.5-1) unstable; urgency=low * New upstream release, Closes: #205285 * Added netpipe_paper.ps to docs * Carried forward manpage from 2.4 * Applied bounds checking patch to -o argument, Closes: #203488 * Added NPlam2 executable -- Camm Maguire Wed, 20 Aug 2003 19:12:28 +0000 netpipe (2.4-5) unstable; urgency=low * Added watchfile * Added -lutil to EXTRA_LIBS * remove dh_suidregister * Newer standards * Cleaned copyright file * Recently required link against -lpmich added * Shared lib linking for NPmpich -- Camm Maguire Wed, 20 Aug 2003 17:49:06 +0000 netpipe (2.4-4) unstable; urgency=low * Varied long descriptions, Closes: #135533 * Newer standards -- Camm Maguire Sun, 24 Feb 2002 19:31:29 -0500 netpipe (2.4-3) unstable; urgency=low * Fixed spelling of NetPIPE in control, Closes: #132928 -- Camm Maguire Mon, 11 Feb 2002 10:47:14 -0500 netpipe (2.4-2) unstable; urgency=low * Rebuild against new lam3-dev, Closes: #106949 -- Camm Maguire Mon, 30 Jul 2001 13:34:26 -0400 netpipe (2.4-1) unstable; urgency=low * New upstream release * Added debhelper to Build-Depends -- Camm Maguire Fri, 23 Mar 2001 18:17:11 -0500 netpipe (2.3-3) unstable; urgency=low * Build depends on lam2-dev, pvm-dev and mpich * Search for mpich includes and libs, closes: #72842 -- Camm Maguire Thu, 25 Jan 2001 18:16:56 -0500 netpipe (2.3-2) unstable; urgency=low * Upgraded to latest standards version -- Camm Maguire Sun, 2 Apr 2000 16:11:56 +0200 netpipe (2.3-1) unstable; urgency=low * Altered Makefile to allow for lam and mpich builds using runtime variables * Initial Release. -- Camm Maguire Sat, 1 Jan 2000 23:01:51 -0500 debian/netpipe-lam.files0000664000000000000000000000003511741362170012427 0ustar usr/bin/NPlam usr/bin/NPlam2 debian/netpipe-mpich.docs0000664000000000000000000000004011741362170012600 0ustar dox/README dox/netpipe_paper.ps debian/compat0000664000000000000000000000000211741363724010376 0ustar 9 debian/netpipe-mpich2.docs0000664000000000000000000000004011741362170012662 0ustar dox/README dox/netpipe_paper.ps debian/netpipe-openmpi.files0000664000000000000000000000004511741362170013326 0ustar usr/bin/NPopenmpi usr/bin/NPopenmpi2 debian/netpipe-tcp.manpages0000664000000000000000000000001011741362170013126 0ustar NPtcp.1 debian/copyright0000664000000000000000000000225211741363560011132 0ustar This package was debianized by Camm Maguire on Sat, 1 Jan 2000 23:01:51 -0500. It was downloaded from ftp://ftp.scl.ameslab.gov/pub/netpipe/ Upstream Authors: Guy Helmer et.al., ghelmer@scl.ameslab.gov Dave Turner, turner@ameslab.gov Copyright: /*****************************************************************************/ /* "NetPIPE" -- Network Protocol Independent Performance Evaluator. */ /* Copyright 1997, 1998 Iowa State University Research Foundation, Inc. */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation. You should have received a copy of the */ /* GNU General Public License along with this program; if not, write to the */ /* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, */ /* MA 02110-1301, USA. */ netpipe is covered under the terms of the GPL. See the file /usr/share/common-licenses/GPL for more information. debian/netpipe-tcp.docs0000664000000000000000000000004011741362170012266 0ustar dox/README dox/netpipe_paper.ps debian/source/0000775000000000000000000000000011741362620010472 5ustar debian/source/format0000664000000000000000000000001411741362620011700 0ustar 3.0 (quilt) debian/netpipe.10000664000000000000000000002462511741362170010731 0ustar .\" -*- nroff -*- .\" .\" NetPIPE -- Network Protocol Independent Performance Evaluator. .\" Copyright 1997, 1998 Iowa State University Research Foundation, Inc. .\" .\" This program is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation. You should have received a copy of the .\" GNU General Public License along with this program; if not, write to the .\" Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. .\" .\" netpipe.1 .\" Created: Mon Jun 15 1998 by Guy Helmer .\" Rewritten: Jun 1 2004 by Dave Turner .\" .\" $Id: netpipe.1,v 1.3 1998/09/24 16:23:59 ghelmer Exp $ .\" .TH netpipe 1 "June 1, 2004" "NetPIPE" "netpipe" .SH NAME NetPIPE \- .IB Net work .IB P rotocol .IB I ndependent .IB P erformance .IB E valuator .SH SYNOPSIS .B NPtcp [\c .BI \-h \ receiver_hostname\fR\c ] [\c .BI \-b \ TCP_buffer_sizes\fR\c ] [options] .PP mpirun [\c .BI \-machinefile \ hostlist\fR\c ] -np 2 .B NPmpi [-a] [-S] [-z] [options] .PP mpirun [\c .BI \-machinefile \ hostlist\fR\c ] -np 2 .B NPmpi2 [-f] [-g] [options] .PP .B NPpvm [options] See the TESTING sections below for a more complete description of how to run NetPIPE in each environment. The OPTIONS section describes the general options available for all modules. See the README file from the tar-ball at http://www.scl.ameslab.gov/Projects/NetPIPE/ for documentation on the InfiniBand, GM, SHMEM, LAPI, and memcpy modules. .SH DESCRIPTION .PP .B NetPIPE uses a simple series of ping-pong tests over a range of message sizes to provide a complete measure of the performance of a network. It bounces messages of increasing size between two processes, whether across a network or within an SMP system. Message sizes are chosen at regular intervals, and with slight perturbations, to provide a complete evaluation of the communication system. Each data point involves many ping-pong tests to provide an accurate timing. Latencies are calculated by dividing the round trip time in half for small messages ( less than 64 Bytes ). .PP The communication time for small messages is dominated by the overhead in the communication layers, meaning that the transmission is latency bound. For larger messages, the communication rate becomes bandwidth limited by some component in the communication subsystem (PCI bus, network card link, network switch). .PP These measurements can be done at the message-passing layer (MPI, MPI-2, and PVM) or at the native communications layers that that run upon (TCP/IP, GM for Myrinet cards, InfiniBand, SHMEM for the Cray T3E systems, and LAPI for IBM SP systems). Recent work is being aimed at measuring some internal system properties such as the memcpy module that measures the internal memory copy rates, or a disk module under development that measures the performance to various I/O devices. .PP Some uses for NetPIPE include: .RS .PP Comparing the latency and maximum throughput of various network cards. .PP Comparing the performance between different types of networks. .PP Looking for inefficiencies in the message-passing layer by comparing it to the native communication layer. .PP Optimizing the message-passing layer and tune OS and driver parameters for optimal performance of the communication subsystem. .RE .PP .B NetPIPE is provided with many modules allowing it to interface with a wide variety of communication layers. It is fairly easy to write new interfaces for other reliable protocols by using the existing modules as examples. .SH TESTING TCP .PP NPtcp can now be launched in two ways, by manually starting NPtcp on both systems or by using a nplaunch script. To manually start NPtcp, the NetPIPE receiver must be started first on the remote system using the command: .PP NPtcp [options] .PP then the primary transmitter is started on the local system with the command .PP NPtcp \-h .I receiver_hostname [options] .PP Any options used must be the same on both sides. The nplaunch script uses ssh to launch the remote receiver before starting the local transmitter. To use rsh, simply change the nplaunch script. .PP nplaunch NPtcp -h .I receiver_hostname [options] .PP The .BI \-b \ TCP_buffer_sizes\fR\c option sets the TCP socket buffer size, which can greatly influence the maximum throughput on some systems. A throughput graph that flattens out suddenly may be a sign of the performance being limited by the socket buffer sizes. .SH TESTING MPI and MPI-2 .PP Use of the MPI interface for NetPIPE depends on the MPI implementation being used. All will require the number of processes to be specified, usually with a .I -np 2 argument. Clusters environments may require a list of the hosts being used when each job is run. Put the list of hosts in hostlist then, for OpenMPI, run NetPIPE using: .PP mpirun --hostfile .I hostlist \-np 2 NPmpi [NetPIPE options] .PP For MPICH2 use instead: .PP mpirun \-machinefile .I hostlist \-np 2 NPmpi [NetPIPE options] .PP To test the 1-sided communications of the MPI-2 standard, compile using: .PP .B make mpi2 .PP Running as described above and MPI will use 1-sided MPI_Put() calls in both directions, with each receiver blocking until the last byte has been overwritten before bouncing the message back. Use the .I -f option to force usage of a fence to block rather than an overwrite of the last byte. The .I -g option will use MP_Get() functions to transfer the data rather than MP_Put(). .SH TESTING PVM .PP Start the pvm system using: .PP pvm .PP and adding a second machine with the PVM command .PP add .I receiver_hostname .PP Exit the PVM command line interface using quit, then run the PVM NetPIPE receiver on one system with the command: .PP NPpvm [options] .PP and run the TCP NetPIPE transmitter on the other system with the command: .PP NPpvm -h .I receiver hostname [options] .PP Any options used must be the same on both sides. The nplaunch script may also be used with NPpvm as described above for NPtcp. .SH TESTING METHODOLOGY .PP .B NetPIPE tests network performance by sending a number of messages at each block size, starting from the lower bound on the message sizes. The message size is incremented until the upper bound on the message size is reached or the time to transmit a block exceeds one second, which ever occurs first. Message sizes are chosen at regular intervals, and for slight perturbations from them to provide a more complete evaluation of the communication subsystem. .PP The .B NetPIPE\c output file may be graphed using a program such as .B gnuplot(1)\. The output file contains three columns: the number of bytes in the block, the transfer rate in bits per second, and the time to transfer the block (half the round-trip time). The first two columns are normally used to graph the throughput vs block size, while the third column provides the latency. For example, the .B throughput versus block size graph can be created by graphing bytes versus bits per second. Sample .B gnuplot(1) commands for such a graph would be .PP set logscale x .PP plot "np.out" .ne 5 .SH OPTIONS .TP .B \-a asynchronous mode: prepost receives (MPI, IB modules) .ne 3 .TP .BI \-b \ \fITCP_buffer_sizes\fR Set the send and receive TCP buffer sizes (TCP module only). .ne 3 .TP .B \-B Burst mode where all receives are preposted at once (MPI, IB modules). .ne 3 .TP .B \-f Use a fence to block for completion (MPI2 module only). .ne 3 .TP .B \-g Use MPI_Get() instead of MPI_Put() (MPI2 module only). .ne 3 .TP .BI \-h \ \fIhostname\fR Specify the name of the receiver host to connect to (TCP, PVM, IB, GM). .ne 3 .TP .B \-I Invalidate cache to measure performance without cache effects (mostly affects IB and memcpy modules). .ne 3 .TP .B \-i Do an integrity check instead of a performance evaluation. .ne 3 .TP .BI \-l \ \fIstarting_msg_size\fR Specify the lower bound for the size of messages to be tested. .ne 3 .TP .TP .BI \-n \ \fInrepeats\fR Set the number of repeats for each test to a constant. Otherwise, the number of repeats is chosen to provide an accurate timing for each test. Be very careful if specifying a low number so that the time for the ping-pong test exceeds the timer accuracy. .ne 3 .TP .BI \-O \ \fIsource_offset,dest_offset\fR Specify the source and destination offsets of the buffers from perfect page alignment. .ne 3 .TP .BI \-o \ \fIoutput_filename\fR Specify the output filename (default is np.out). .ne 3 .TP .BI \-p \ \fIperturbation_size\fR NetPIPE chooses the message sizes at regular intervals, increasing them exponentially from the lower boundary to the upper boundary. At each point, it also tests perturbations of 3 bytes above and 3 bytes below each test point to find idiosyncrasies in the system. This perturbation value can be changed using the .I -p option, or turned off using .I -p .I 0 .B . .ne 3 .TP .B \-r This option resets the TCP sockets after every test (TCP module only). It is necessary for some streaming tests to get good measurements since the socket window size may otherwise collapse. .ne 3 .TP .B \-s Set streaming mode where data is only transmitted in one direction. .ne 3 .TP .B \-S Use synchronous sends (MPI module only). .ne 3 .TP .BI \-u \ \fIupper_bound\fR Specify the upper boundary to the size of message being tested. By default, NetPIPE will stop when the time to transmit a block exceeds one second. .TP .B \-z Receive messages using MPI_ANY_SOURCE (MPI module only) .ne 3 .TP .B \-2 Set bi-directional mode where both sides send and receive at the same time (supported by most modules). You may need to use .I -a to choose asynchronous communications for MPI to avoid freeze-ups. For TCP, the maximum test size will be limited by the TCP buffer sizes. .ne 3 .ne 3 .SH FILES .TP .I np.out Default output file for .BR NetPIPE . Overridden by the .B \-o option. .SH AUTHOR .PP The original NetPIPE core plus TCP and MPI modules were written by Quinn Snell, Armin Mikler, Guy Helmer, and John Gustafson. NetPIPE is currently being developed and maintained by Dave Turner with contributions from many students (Bogdan Vasiliu, Adam Oline, Xuehua Chen, and Brian Smith). .PP Send comments/bug-reports to: .I . .PP Additional information about .B NetPIPE can be found on the World Wide Web at .I http://www.scl.ameslab.gov/Projects/NetPIPE/ .SH BUGS As of version 3.6.1, there is a bug that causes NetPIPE to segfault on RedHat Enterprise systems. I will debug this as soon as I get access to a few such systems. -Dave Turner (turner@ameslab.gov) debian/netpipe-pvm.docs0000664000000000000000000000004011741362170012302 0ustar dox/README dox/netpipe_paper.ps debian/netpipe-mpich2.manpages0000664000000000000000000000001311741363706013533 0ustar NPmpich2.1 debian/netpipe-openmpi.docs0000664000000000000000000000004011741362170013147 0ustar dox/README dox/netpipe_paper.ps debian/netpipe-openmpi.manpages0000664000000000000000000000003111741362170014012 0ustar NPopenmpi.1 NPopenmpi2.1 debian/netpipe-tcp.files0000664000000000000000000000001611741362170012443 0ustar usr/bin/NPtcp debian/netpipe-pvm.manpages0000664000000000000000000000001011741362170013142 0ustar NPpvm.1 debian/netpipe-mpich.files0000664000000000000000000000002011741362170012750 0ustar usr/bin/NPmpich debian/netpipe-mpich.manpages0000664000000000000000000000001211741362170013442 0ustar NPmpich.1 debian/netpipe-lam.docs0000664000000000000000000000004011741362170012251 0ustar dox/README dox/netpipe_paper.ps debian/netpipe-mpich2.files0000664000000000000000000000002111741362170013033 0ustar usr/bin/NPmpich2 debian/mpich.control0000664000000000000000000000242711743022222011672 0ustar Package: netpipe-mpich Architecture: any Depends: mpich-bin, ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using MPICH MPI NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package measures network performance using the MPI protocol, a Message Passing Interface frequently used in parallel processing, and which uses in turn TCP as its underlying transport. The implementation of the MPI standard used by this package is that provided by the mpich package. debian/netpipe-pvm.files0000664000000000000000000000001611741362170012457 0ustar usr/bin/NPpvm debian/control0000664000000000000000000001510411752737566010617 0ustar Source: netpipe Section: net Priority: extra Maintainer: Camm Maguire Build-Depends: lam4-dev ( >= 7.1.1-5 ), libopenmpi-dev [alpha amd64 armel armhf i386 ia64 powerpc powerpcspe sparc sparc64 kfreebsd-i386 kfreebsd-amd64 hurd-i386], libmpich2-dev, pvm-dev,debhelper ( >= 4 ), debhelper ( >= 9 ) Standards-Version: 3.9.3 Package: netpipe-tcp Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using the TCP protocol NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package uses a raw TCP protocol to measure network performance. Package: netpipe-lam Architecture: any Depends: lam-runtime, ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using LAM MPI NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package measures network performance using the MPI protocol, a Message Passing Interface frequently used in parallel processing, and which uses in turn TCP as its underlying transport. The implementation of the MPI standard used by this package is that provided by the lam set of packages. Package: netpipe-openmpi Architecture: alpha amd64 armel armhf i386 ia64 powerpc powerpcspe sparc sparc64 kfreebsd-i386 kfreebsd-amd64 hurd-i386 Depends: openmpi-bin, ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using OpenMPI NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package measures network performance using the MPI protocol, a Message Passing Interface frequently used in parallel processing, and which uses in turn TCP as its underlying transport. The implementation of the MPI standard used by this package is that provided by the openmpi set of packages. Package: netpipe-mpich2 Architecture: any Depends: mpich2, ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using MPICH2 MPI NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package measures network performance using the MPI protocol, a Message Passing Interface frequently used in parallel processing, and which uses in turn TCP as its underlying transport. The implementation of the MPI standard used by this package is that provided by the mpich2 package. Package: netpipe-pvm Architecture: any Priority: extra Depends: pvm, ${shlibs:Depends}, ${misc:Depends} Description: Network performance tool using PVM NetPIPE is a protocol independent performance tool that encapsulates the best of ttcp and netperf and visually represents the network performance under a variety of conditions. By taking the end-to-end application view of a network, NetPIPE clearly shows the overhead associated with different protocol layers. NetPIPE answers such questions as: how soon will a given data block of size k arrive at its destination? Which network and protocol will transmit size k blocks the fastest? What is a given network's effective maximum throughput and saturation level? Does there exist a block size k for which the throughput is maximized? How much communication overhead is due to the network communication protocol layer(s)? How quickly will a small (< 1 kbyte) control message arrive, and which network and protocol are best for this purpose? . This package measures network performance using the PVM protocol, a Parallel Virtual Machine interface frequently used in parallel processing, and which uses in turn TCP as its underlying transport. PVM support is provided in its own separate pvm package on Debian systems. debian/docs0000664000000000000000000000004011741362170010037 0ustar dox/README dox/netpipe_paper.ps debian/dirs0000664000000000000000000000001011741362170010045 0ustar usr/bin debian/patches/0000775000000000000000000000000011741362703010623 5ustar debian/patches/series0000664000000000000000000000002611741362703012036 0ustar 3.0-quilt-source-init debian/patches/3.0-quilt-source-init0000664000000000000000000000407611741362703014450 0ustar Description: TODO: Put a short summary on the line above and replace this paragraph with a longer explanation of this change. Complete the meta-information with other relevant fields (see below for details). To make it easier, the information below has been extracted from the changelog. Adjust it or drop it. . netpipe (3.7.2-1) unstable; urgency=low . * New upstream release * lintian fixes Author: Camm Maguire --- The information above should follow the Patch Tagging Guidelines, please checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here are templates for supplementary fields that you might want to add: Origin: , Bug: Bug-Debian: http://bugs.debian.org/ Bug-Ubuntu: https://launchpad.net/bugs/ Forwarded: Reviewed-By: Last-Update: --- netpipe-3.7.2.orig/src/netpipe.h +++ netpipe-3.7.2/src/netpipe.h @@ -18,6 +18,7 @@ #include #include #include +#include #include /* struct timeval */ #include /* getrusage() */ #include /* malloc(3) */ @@ -420,6 +421,10 @@ void SaveRecvPtr(ArgStruct* p); void ResetRecvPtr(ArgStruct* p); +void AfterAlignmentInit(ArgStruct* p); + +/* void InitBufferData(ArgStruct *p, int nbytes); */ + void PrintUsage(); int getopt( int argc, char * const argv[], const char *optstring); --- netpipe-3.7.2.orig/src/netpipe.c +++ netpipe-3.7.2/src/netpipe.c @@ -145,7 +145,7 @@ int main(int argc, char **argv) printf("Performance measured without cache effects\n\n"); fflush(stdout); break; - case 'o': strcpy(s,optarg); + case 'o': memset(s,0,sizeof(s));strncpy(s,optarg,sizeof(s)-1); printf("Sending output to %s\n", s); fflush(stdout); break; debian/netpipe-lam.manpages0000664000000000000000000000002111741362170013113 0ustar NPlam.1 NPlam2.1