DrIFT-2.2.3/0000777000076400007640000000000010753607202007445 500000000000000DrIFT-2.2.3/code/0000777000076400007640000000000010753606147010366 500000000000000DrIFT-2.2.3/code/FunctorM.hs0000644000076400007640000000070010753606147012370 00000000000000module FunctorM where import Array class FunctorM f where fmapM :: Monad m => (a -> m b) -> f a -> m (f b) instance FunctorM [] where fmapM f xs = mapM f xs instance FunctorM Maybe where fmapM _ Nothing = return Nothing fmapM f (Just x) = f x >>= return . Just instance Ix i => FunctorM (Array i) where fmapM f a = sequence [ f e >>= return . (,) i | (i,e) <- assocs a] >>= return . array b where b = bounds a DrIFT-2.2.3/code/README.txt0000644000076400007640000000032310753606147011776 00000000000000This directory contains various modules which are assosiated with deriving rules. They may either be used as-is or some may require modification or have a suitable replacement in the standard libraries already. DrIFT-2.2.3/code/GhcBinary.hs0000644000076400007640000005063210753606147012512 00000000000000{-# OPTIONS -fallow-overlapping-instances #-} -- -- (c) The University of Glasgow 2002 -- -- Binary I/O library, with special tweaks for GHC -- -- Based on the nhc98 Binary library, which is copyright -- (c) Malcolm Wallace and Colin Runciman, University of York, 1998. -- Under the terms of the license for that software, we must tell you -- where you can obtain the original version of the Binary library, namely -- http://www.cs.york.ac.uk/fp/nhc98/ -- arch-tag: 1418e09a-9a18-4dca-a0fc-9262c9d97beb module Binary ( {-type-} Bin, {-class-} Binary(..), {-type-} BinHandle, openBinIO, openBinIO_, openBinMem, -- closeBin, seekBin, tellBin, castBin, writeBinMem, readBinMem, isEOFBin, -- for writing instances: putByte, getByte, -- lazy Bin I/O lazyGet, lazyPut, -- GHC only: ByteArray(..), getByteArray, putByteArray --getBinFileWithDict, -- :: Binary a => FilePath -> IO a --putBinFileWithDict, -- :: Binary a => FilePath -> ModuleName -> a -> IO () ) where --import FastString import FastMutInt import Data.Array.IO import Data.Array import Data.Bits import Data.Int import Data.Word import Data.IORef import Data.Char ( ord, chr ) import Data.Array.Base ( unsafeRead, unsafeWrite ) import Control.Monad ( when ) import Control.Exception ( throwDyn ) import System.IO as IO import System.IO.Unsafe ( unsafeInterleaveIO ) import System.IO.Error ( mkIOError, eofErrorType ) import GHC.Real ( Ratio(..) ) import GHC.Exts import GHC.IOBase ( IO(..) ) import GHC.Word ( Word8(..) ) import System.IO ( openBinaryFile ) import PackedString --import Atom import Time import Monad import Data.Array.IArray import Data.Array.Base {- #if __GLASGOW_HASKELL__ < 503 type BinArray = MutableByteArray RealWorld Int newArray_ bounds = stToIO (newCharArray bounds) unsafeWrite arr ix e = stToIO (writeWord8Array arr ix e) unsafeRead arr ix = stToIO (readWord8Array arr ix) #if __GLASGOW_HASKELL__ < 411 newByteArray# = newCharArray# #endif hPutArray h arr sz = hPutBufBAFull h arr sz hGetArray h sz = hGetBufBAFull h sz mkIOError :: IOErrorType -> String -> Maybe Handle -> Maybe FilePath -> Exception mkIOError t location maybe_hdl maybe_filename = IOException (IOError maybe_hdl t location "" #if __GLASGOW_HASKELL__ > 411 maybe_filename #endif ) eofErrorType = EOF #ifndef SIZEOF_HSWORD #define SIZEOF_HSWORD WORD_SIZE_IN_BYTES #endif #else type BinArray = IOUArray Int Word8 #endif -} -- #define SIZEOF_HSINT 4 type BinArray = IOUArray Int Word8 --------------------------------------------------------------- -- BinHandle --------------------------------------------------------------- data BinHandle = BinMem { -- binary data stored in an unboxed array off_r :: !FastMutInt, -- the current offset sz_r :: !FastMutInt, -- size of the array (cached) arr_r :: !(IORef BinArray) -- the array (bounds: (0,size-1)) } -- XXX: should really store a "high water mark" for dumping out -- the binary data to a file. | BinIO { -- binary data stored in a file off_r :: !FastMutInt, -- the current offset (cached) hdl :: !IO.Handle -- the file handle (must be seekable) } -- cache the file ptr in BinIO; using hTell is too expensive -- to call repeatedly. If anyone else is modifying this Handle -- at the same time, we'll be screwed. --getUserData :: BinHandle -> UserData --getUserData bh = bh_usr bh --setUserData :: BinHandle -> UserData -> BinHandle --setUserData bh us = bh { bh_usr = us } --------------------------------------------------------------- -- Bin --------------------------------------------------------------- newtype Bin a = BinPtr Int deriving (Eq, Ord, Show, Bounded) castBin :: Bin a -> Bin b castBin (BinPtr i) = BinPtr i --------------------------------------------------------------- -- class Binary --------------------------------------------------------------- class Binary a where put_ :: BinHandle -> a -> IO () put :: BinHandle -> a -> IO (Bin a) get :: BinHandle -> IO a -- define one of put_, put. Use of put_ is recommended because it -- is more likely that tail-calls can kick in, and we rarely need the -- position return value. put_ bh a = do put bh a; return () put bh a = do p <- tellBin bh; put_ bh a; return p putAt :: Binary a => BinHandle -> Bin a -> a -> IO () putAt bh p x = do seekBin bh p; put bh x; return () getAt :: Binary a => BinHandle -> Bin a -> IO a getAt bh p = do seekBin bh p; get bh openBinIO_ :: IO.Handle -> IO BinHandle openBinIO_ h = openBinIO h openBinIO :: IO.Handle -> IO BinHandle openBinIO h = do r <- newFastMutInt writeFastMutInt r 0 return (BinIO r h) openBinMem :: Int -> IO BinHandle openBinMem size | size <= 0 = error "Data.Binary.openBinMem: size must be >= 0" | otherwise = do arr <- newArray_ (0,size-1) arr_r <- newIORef arr ix_r <- newFastMutInt writeFastMutInt ix_r 0 sz_r <- newFastMutInt writeFastMutInt sz_r size return (BinMem ix_r sz_r arr_r) tellBin :: BinHandle -> IO (Bin a) tellBin (BinIO r _) = do ix <- readFastMutInt r; return (BinPtr ix) tellBin (BinMem r _ _) = do ix <- readFastMutInt r; return (BinPtr ix) seekBin :: BinHandle -> Bin a -> IO () seekBin (BinIO ix_r h) (BinPtr p) = do writeFastMutInt ix_r p hSeek h AbsoluteSeek (fromIntegral p) seekBin h@(BinMem ix_r sz_r a) (BinPtr p) = do sz <- readFastMutInt sz_r if (p >= sz) then do expandBin h p; writeFastMutInt ix_r p else writeFastMutInt ix_r p isEOFBin :: BinHandle -> IO Bool isEOFBin (BinMem ix_r sz_r a) = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r return (ix >= sz) isEOFBin (BinIO ix_r h) = hIsEOF h writeBinMem :: BinHandle -> FilePath -> IO () writeBinMem (BinIO _ _) _ = error "Data.Binary.writeBinMem: not a memory handle" writeBinMem (BinMem ix_r sz_r arr_r) fn = do h <- openBinaryFile fn WriteMode arr <- readIORef arr_r ix <- readFastMutInt ix_r hPutArray h arr ix hClose h readBinMem :: FilePath -> IO BinHandle -- Return a BinHandle with a totally undefined State readBinMem filename = do h <- openBinaryFile filename ReadMode filesize' <- hFileSize h let filesize = fromIntegral filesize' arr <- newArray_ (0,filesize-1) count <- hGetArray h arr filesize when (count /= filesize) (error ("Binary.readBinMem: only read " ++ show count ++ " bytes")) hClose h arr_r <- newIORef arr ix_r <- newFastMutInt writeFastMutInt ix_r 0 sz_r <- newFastMutInt writeFastMutInt sz_r filesize return (BinMem ix_r sz_r arr_r) -- expand the size of the array to include a specified offset expandBin :: BinHandle -> Int -> IO () expandBin (BinMem ix_r sz_r arr_r) off = do sz <- readFastMutInt sz_r let sz' = head (dropWhile (<= off) (iterate (* 2) sz)) arr <- readIORef arr_r arr' <- newArray_ (0,sz'-1) sequence_ [ unsafeRead arr i >>= unsafeWrite arr' i | i <- [ 0 .. sz-1 ] ] writeFastMutInt sz_r sz' writeIORef arr_r arr' return () expandBin (BinIO _ _) _ = return () -- no need to expand a file, we'll assume they expand by themselves. -- ----------------------------------------------------------------------------- -- Low-level reading/writing of bytes putWord8 :: BinHandle -> Word8 -> IO () putWord8 h@(BinMem ix_r sz_r arr_r) w = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r -- double the size of the array if it overflows if (ix >= sz) then do expandBin h ix putWord8 h w else do arr <- readIORef arr_r unsafeWrite arr ix w writeFastMutInt ix_r (ix+1) return () putWord8 (BinIO ix_r h) w = do ix <- readFastMutInt ix_r hPutChar h (chr (fromIntegral w)) -- XXX not really correct writeFastMutInt ix_r (ix+1) return () getWord8 :: BinHandle -> IO Word8 getWord8 (BinMem ix_r sz_r arr_r) = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r when (ix >= sz) $ ioError (mkIOError eofErrorType "Data.Binary.getWord8" Nothing Nothing) arr <- readIORef arr_r w <- unsafeRead arr ix writeFastMutInt ix_r (ix+1) return w getWord8 (BinIO ix_r h) = do ix <- readFastMutInt ix_r c <- hGetChar h writeFastMutInt ix_r (ix+1) return $! (fromIntegral (ord c)) -- XXX not really correct {-# INLINE putByte #-} putByte :: BinHandle -> Word8 -> IO () putByte bh w = putWord8 bh w {-# INLINE getByte #-} getByte :: BinHandle -> IO Word8 getByte = getWord8 -- ----------------------------------------------------------------------------- -- Primitve Word writes instance Binary Word8 where put_ = putWord8 get = getWord8 instance Binary Word16 where put_ h w = do -- XXX too slow.. inline putWord8? putByte h (fromIntegral (w `shiftR` 8)) putByte h (fromIntegral (w .&. 0xff)) get h = do w1 <- getWord8 h w2 <- getWord8 h return $! ((fromIntegral w1 `shiftL` 8) .|. fromIntegral w2) instance Binary Word32 where put_ h w = do putByte h (fromIntegral (w `shiftR` 24)) putByte h (fromIntegral ((w `shiftR` 16) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 8) .&. 0xff)) putByte h (fromIntegral (w .&. 0xff)) get h = do w1 <- getWord8 h w2 <- getWord8 h w3 <- getWord8 h w4 <- getWord8 h return $! ((fromIntegral w1 `shiftL` 24) .|. (fromIntegral w2 `shiftL` 16) .|. (fromIntegral w3 `shiftL` 8) .|. (fromIntegral w4)) instance Binary Word64 where put_ h w = do putByte h (fromIntegral (w `shiftR` 56)) putByte h (fromIntegral ((w `shiftR` 48) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 40) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 32) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 24) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 16) .&. 0xff)) putByte h (fromIntegral ((w `shiftR` 8) .&. 0xff)) putByte h (fromIntegral (w .&. 0xff)) get h = do w1 <- getWord8 h w2 <- getWord8 h w3 <- getWord8 h w4 <- getWord8 h w5 <- getWord8 h w6 <- getWord8 h w7 <- getWord8 h w8 <- getWord8 h return $! ((fromIntegral w1 `shiftL` 56) .|. (fromIntegral w2 `shiftL` 48) .|. (fromIntegral w3 `shiftL` 40) .|. (fromIntegral w4 `shiftL` 32) .|. (fromIntegral w5 `shiftL` 24) .|. (fromIntegral w6 `shiftL` 16) .|. (fromIntegral w7 `shiftL` 8) .|. (fromIntegral w8)) -- ----------------------------------------------------------------------------- -- Primitve Int writes instance Binary Int8 where put_ h w = put_ h (fromIntegral w :: Word8) get h = do w <- get h; return $! (fromIntegral (w::Word8)) instance Binary Int16 where put_ h w = put_ h (fromIntegral w :: Word16) get h = do w <- get h; return $! (fromIntegral (w::Word16)) instance Binary Int32 where put_ h w = put_ h (fromIntegral w :: Word32) get h = do w <- get h; return $! (fromIntegral (w::Word32)) instance Binary Int64 where put_ h w = put_ h (fromIntegral w :: Word64) get h = do w <- get h; return $! (fromIntegral (w::Word64)) -- ----------------------------------------------------------------------------- -- Instances for standard types instance Binary () where put_ bh () = return () get _ = return () -- getF bh p = case getBitsF bh 0 p of (_,b) -> ((),b) instance Binary Bool where put_ bh b = putByte bh (fromIntegral (fromEnum b)) get bh = do x <- getWord8 bh; return $! (toEnum (fromIntegral x)) -- getF bh p = case getBitsF bh 1 p of (x,b) -> (toEnum x,b) instance Binary Char where put_ bh c = put_ bh (fromIntegral (ord c) :: Word32) get bh = do x <- get bh; return $! (chr (fromIntegral (x :: Word32))) -- getF bh p = case getBitsF bh 8 p of (x,b) -> (toEnum x,b) instance Binary Int where -- #if SIZEOF_HSINT == 4 put_ bh i = put_ bh (fromIntegral i :: Int32) get bh = do x <- get bh return $! (fromIntegral (x :: Int32)) -- #elif SIZEOF_HSINT == 8 -- put_ bh i = put_ bh (fromIntegral i :: Int64) -- get bh = do -- x <- get bh -- return $! (fromIntegral (x :: Int64)) -- #else -- #error "unsupported sizeof(HsInt)" -- #endif instance Binary ClockTime where put_ bh ct = do let t = toUTCTime ct put_ bh (ctYear t) put_ bh (fromEnum $ ctMonth t) put_ bh (ctDay t) put_ bh (ctHour t) put_ bh (ctMin t) put_ bh (ctSec t) get bh = do year <- get bh month <- fmap toEnum $ get bh day <- get bh hour <- get bh min <- get bh sec <- get bh return $ toClockTime $ (toUTCTime epoch) {ctYear = year, ctDay = day, ctMonth = month, ctHour = hour, ctMin = min, ctSec = sec} epoch = toClockTime $ CalendarTime { ctYear = 1970, ctMonth = January, ctDay = 0, ctHour = 0, ctMin = 0, ctSec = 0, ctTZ = 0, ctPicosec = 0, ctWDay = undefined, ctYDay = undefined, ctTZName = undefined, ctIsDST = undefined} instance Binary PackedString where put_ bh (PS a) = put_ bh a get bh = fmap PS $ get bh --put_ bh $ (snd $ Data.Array.IArray.bounds a) + 1 --mapM_ (put_ bh) (Data.Array.IArray.elems a) --sz <- get bh --x <- sequence $ replicate sz (get bh) --return $ PS (Data.Array.IArray.listArray (0,sz - 1) x) --put_ bh ps = put_ bh (unpackPS ps) --get bh = liftM packString $ get bh --put_ bh ps = putNList_ bh (unpackPS ps) --get bh = liftM packString $ getNList bh -- putNList_ bh xs = do -- put_ bh (length xs) -- mapM_ (put_ bh) xs -- -- getNList bh = do -- l <- get bh -- sequence $ replicate l (get bh) {- instance Binary [Char] where put_ bh cs = put_ bh (packString cs) get bh = do ps <- get bh return $ unpackPS ps -} instance Binary a => Binary [a] where put_ bh [] = putByte bh 0 put_ bh (x:xs) = do putByte bh 1; put_ bh x; put_ bh xs get bh = do h <- getWord8 bh case h of 0 -> return [] _ -> do x <- get bh xs <- get bh return (x:xs) instance (Binary a, Binary b) => Binary (a,b) where put_ bh (a,b) = do put_ bh a; put_ bh b get bh = do a <- get bh b <- get bh return (a,b) instance (Binary a, Binary b, Binary c) => Binary (a,b,c) where put_ bh (a,b,c) = do put_ bh a; put_ bh b; put_ bh c get bh = do a <- get bh b <- get bh c <- get bh return (a,b,c) instance (Binary a, Binary b, Binary c, Binary d) => Binary (a,b,c,d) where put_ bh (a,b,c,d) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d get bh = do a <- get bh b <- get bh c <- get bh d <- get bh return (a,b,c,d) instance Binary a => Binary (Maybe a) where put_ bh Nothing = putByte bh 0 put_ bh (Just a) = do putByte bh 1; put_ bh a get bh = do h <- getWord8 bh case h of 0 -> return Nothing _ -> do x <- get bh return (Just x) instance (Binary a, Binary b) => Binary (Either a b) where put_ bh (Left a) = do putByte bh 0; put_ bh a put_ bh (Right b) = do putByte bh 1; put_ bh b get bh = do h <- getWord8 bh case h of 0 -> do a <- get bh ; return (Left a) _ -> do b <- get bh ; return (Right b) -- these flatten the start element. hope that's okay! instance Binary (UArray Int Word8) where put_ bh@(BinIO ix_r h) ua = do let sz = rangeSize (Data.Array.IO.bounds ua) ix <- readFastMutInt ix_r put_ bh sz ua <- unsafeThaw ua hPutArray h ua sz writeFastMutInt ix_r (ix + sz + 4) put_ bh (UArray s e ba) = do let sz = (rangeSize (s,e)) put_ bh sz case sz of I# i -> putByteArray bh ba i get bh@(BinIO ix_r h) = do ix <- readFastMutInt ix_r sz <- get bh ba <- newArray_ (0, sz - 1) hGetArray h ba sz writeFastMutInt ix_r (ix + sz + 4) ba <- unsafeFreeze ba return ba get bh = do sz <- get bh BA ba <- getByteArray bh sz return $ UArray 0 (sz - 1) ba {- instance (Ix a, Binary a) => Binary (UArray a Word8) where put_ bh (UArray s e ba) = do put_ bh s put_ bh e case (rangeSize (s,e)) of I# i -> putByteArray bh ba i get bh = do s <- get bh e <- get bh BA ba <- getByteArray bh (rangeSize (s,e)) return $ UArray s e ba -} -- #ifdef __GLASGOW_HASKELL__ instance Binary Integer where put_ bh (S# i#) = do putByte bh 0; put_ bh (I# i#) put_ bh (J# s# a#) = do p <- putByte bh 1; put_ bh (I# s#) let sz# = sizeofByteArray# a# -- in *bytes* put_ bh (I# sz#) -- in *bytes* putByteArray bh a# sz# get bh = do b <- getByte bh case b of 0 -> do (I# i#) <- get bh return (S# i#) _ -> do (I# s#) <- get bh sz <- get bh (BA a#) <- getByteArray bh sz return (J# s# a#) putByteArray :: BinHandle -> ByteArray# -> Int# -> IO () putByteArray bh a s# = loop 0# where loop n# | n# ==# s# = return () | otherwise = do putByte bh (indexByteArray a n#) loop (n# +# 1#) getByteArray :: BinHandle -> Int -> IO ByteArray getByteArray bh (I# sz) = do (MBA arr) <- newByteArray sz let loop n | n ==# sz = return () | otherwise = do w <- getByte bh writeByteArray arr n w loop (n +# 1#) loop 0# freezeByteArray arr data ByteArray = BA ByteArray# data MBA = MBA (MutableByteArray# RealWorld) newByteArray :: Int# -> IO MBA newByteArray sz = IO $ \s -> case newByteArray# sz s of { (# s, arr #) -> (# s, MBA arr #) } freezeByteArray :: MutableByteArray# RealWorld -> IO ByteArray freezeByteArray arr = IO $ \s -> case unsafeFreezeByteArray# arr s of { (# s, arr #) -> (# s, BA arr #) } writeByteArray :: MutableByteArray# RealWorld -> Int# -> Word8 -> IO () writeByteArray arr i (W8# w) = IO $ \s -> case writeWord8Array# arr i w s of { s -> (# s, () #) } indexByteArray a# n# = W8# (indexWord8Array# a# n#) instance (Integral a, Binary a) => Binary (Ratio a) where put_ bh (a :% b) = do put_ bh a; put_ bh b get bh = do a <- get bh; b <- get bh; return (a :% b) -- #endif instance Binary (Bin a) where put_ bh (BinPtr i) = put_ bh i get bh = do i <- get bh; return (BinPtr i) -- ----------------------------------------------------------------------------- -- Lazy reading/writing lazyPut :: Binary a => BinHandle -> a -> IO () lazyPut bh a = do -- output the obj with a ptr to skip over it: pre_a <- tellBin bh put_ bh pre_a -- save a slot for the ptr put_ bh a -- dump the object q <- tellBin bh -- q = ptr to after object putAt bh pre_a q -- fill in slot before a with ptr to q seekBin bh q -- finally carry on writing at q lazyGet :: Binary a => BinHandle -> IO a lazyGet bh = do p <- get bh -- a BinPtr p_a <- tellBin bh a <- unsafeInterleaveIO (getAt bh p_a) seekBin bh p -- skip over the object for now return a {- --------------------------------------------------------- -- Reading and writing FastStrings --------------------------------------------------------- putFS bh (FastString id l ba) = do put_ bh (I# l) putByteArray bh ba l putFS bh s = error ("Binary.put_(FastString): " ++ unpackFS s) -- Note: the length of the FastString is *not* the same as -- the size of the ByteArray: the latter is rounded up to a -- multiple of the word size. {- -- possible faster version, not quite there yet: getFS bh@BinMem{} = do (I# l) <- get bh arr <- readIORef (arr_r bh) off <- readFastMutInt (off_r bh) return $! (mkFastSubStringBA# arr off l) -} getFS bh = do (I# l) <- get bh (BA ba) <- getByteArray bh (I# l) return $! (mkFastSubStringBA# ba 0# l) {- instance Binary FastString where put_ bh f@(FastString id l ba) = case getUserData bh of { UserData { ud_next = j_r, ud_map = out_r, ud_dict = dict} -> do out <- readIORef out_r let uniq = getUnique f case lookupUFM out uniq of Just (j,f) -> put_ bh j Nothing -> do j <- readIORef j_r put_ bh j writeIORef j_r (j+1) writeIORef out_r (addToUFM out uniq (j,f)) } put_ bh s = error ("Binary.put_(FastString): " ++ show (unpackFS s)) get bh = do j <- get bh return $! (ud_dict (getUserData bh) ! j) -} -} {- instance Binary Atom where get bh = do ps <- get bh a <- fromPackedStringIO ps return a put_ bh a = put_ bh (toPackedString a) -} DrIFT-2.2.3/README0000644000076400007640000000307410753606147010254 00000000000000------------------------------------------------------------------------------ DrIFT This package contains a source distribution of DrIFT, a tool for automatic derivation of Haskell class instances. DrIFT was formerly known as Derive. the current homepage is at http://repetae.net/john/computer/haskell/DrIFT/ ------------------------------------------------------------------------------ Contents of this package: - src Directory with the source files of the DrIFT tool - example Directory with examples of using the DrIFT tool - docs Documentation ------------------------------------------------------------------------------ To configure and install DrIFT from the source tarball ./configure --prefix= make install Where is the directory in which you want to install the tool. Note that a haskell compiler (nhc98 or ghc) must be in your path. Otherwise, supply the location of a Haskell compiler as follows: ./configure --prefix= --with-hc= make install If you have pulled the source from the darcs repository, you must first build the autotools scripts by running autoreconf -i ------------------------------------------------------------------------------ You may optionally sset DERIVEPATH to the list of directories you wish to search for modules. as an example: DERIVEPATH=/users/grad/nww/share/hugs/lib:/users/grad/nww/share/hugs/lib/hugs to run DrIFT, simply pass the haskell file name as an argument. you may run DrIFT --help to get a summary of the various command line options DrIFT-2.2.3/drift-ghc.in0000644000076400007640000000067610753606147011600 00000000000000#!/bin/sh if [ -z "$1" -o -z "$2" -o -z "$3" ]; then cat < "$3" cat "$2" >> "$3"; fi DrIFT-2.2.3/DrIFT.spec.in0000644000076400007640000000236310753606147011565 00000000000000 %define version @VERSION@ %define release 1 %define prefix /usr %define progname DrIFT Summary: DeRiving Instances From Types Name: DrIFT Version: %{version} Release: %{release} License: LICENSE Group: Applications/Development BuildRoot: /var/tmp/%{progname}-%{version}-%{release}-buildroot Source: http://repetae.net/john/computer/haskell/%{progname}/drop/%{progname}-%{version}.tar.gz URL: http://repetae.net/john/computer/haskell/%{progname}/ Packager: John Meacham Prefix: %{prefix} %description This package contains a source distribution of DrIFT, a tool for automatic derivation of Haskell class instances. DrIFT was formerly known as Derive. %prep %setup %build ./configure --prefix %{prefix} make make docs %clean #rm -rf $RPM_BUILD_ROOT %install make prefix=$RPM_BUILD_ROOT%{prefix} install rm -f $RPM_BUILD_ROOT%{prefix}/info/dir rm -f $RPM_BUILD_ROOT%{prefix}/lib/debug/usr/bin/DrIFT.debug %files %attr(-, root, root) %doc Changelog %attr(-, root, root) %doc LICENSE %attr(-, root, root) %doc docs/drift.ps %attr(-, root, root) %doc docs/drift.html %attr(-, root, root) %{prefix}/bin/DrIFT %attr(-, root, root) %{prefix}/bin/drift-ghc #%attr(-, root, root) %{prefix}/info/dir %attr(-, root, root) %{prefix}/info/drift.info.gz DrIFT-2.2.3/docs/0000777000076400007640000000000010753607202010375 500000000000000DrIFT-2.2.3/docs/version.texi0000644000076400007640000000014510753606426012700 00000000000000@set UPDATED 10 February 2008 @set UPDATED-MONTH February 2008 @set EDITION 2.2.3 @set VERSION 2.2.3 DrIFT-2.2.3/docs/stamp-vti0000644000076400007640000000014510753606426012167 00000000000000@set UPDATED 10 February 2008 @set UPDATED-MONTH February 2008 @set EDITION 2.2.3 @set VERSION 2.2.3 DrIFT-2.2.3/docs/drift.info0000644000076400007640000007344410753606664012325 00000000000000This is docs/drift.info, produced by makeinfo version 4.11 from ./docs/drift.texi. INFO-DIR-SECTION Haskell Tools START-INFO-DIR-ENTRY * DrIFT: (drift). A type sensitive preprocessor for Haskell 98. END-INFO-DIR-ENTRY  File: drift.info, Node: Top, Next: Introduction, Prev: (dir), Up: (dir) DrIFT ***** DrIFT is a type-sensitive preprocessor for Haskell. It is used to automatically generate code for new defined types. * Menu: * Introduction:: * User Guide:: * Standard Rules:: * User-Defined Rules:: * Installation:: * Bugs::  File: drift.info, Node: Introduction, Next: User Guide, Prev: Top, Up: Top 1 Introduction ************** This is a guide to using DrIFT, a type sensitive preprocessor for Haskell 98. DrIFT is a tool which parses a Haskell module for structures (data & newtype declarations) and commands. These commands cause rules to be fired on the parsed data, generating new code which is then appended to the bottom of the input file, or redirected to another. These rules are expressed as Haskell code, and it is intended that the user can add new rules as required. DrIFT is written in pure Haskell 98, however code it generates is free to make use of extensions when appropriate. DrIFT is currently tested against hugs and ghc. * Menu: * What does DrIFT do?:: * Features:: * Motivation:: * An Example::  File: drift.info, Node: What does DrIFT do?, Next: Features, Prev: Introduction, Up: Introduction 1.1 So, What Does DrIFT do? =========================== DrIFT allows derivation of instances for classes that aren't supported by the standard compilers. In addition, instances can be produced in separate modules to that containing the type declaration. This allows instances to be derived for a type after the original module has been compiled. As a bonus, simple utility functions can also be produced for types.  File: drift.info, Node: Features, Next: Motivation, Prev: What does DrIFT do?, Up: Introduction 1.2 Features ============ * DrIFT comes with a set of rules to produce instances for all derivable classes given in the Prelude. There's a rule to produce instances of NFData (the original motivation of all this), and rules for utility functions on types also. The DrIFT implementation is also regularly updated with rules submitted by users. * Code is generated using pretty-printing combinators. This means that the output is (fairly) well formatted, and easy on the human eye. * Effort has been made to make the rule interface as easy to use as possible. This is to allow users to add rules to generate code specific to their own projects. As the rules are written in Haskell themselves, the user doesn't have to learn a new language syntax, and can use all Haskell's features. Currently supported derivations are the following. This list is obtainable by running `DrIFT -l'. Binary: Binary efficient binary encoding of terms GhcBinary byte sized binary encoding of terms Debugging: Observable HOOD observable General: NFData provides 'rnf' to reduce to normal form (deepSeq) Typeable derive Typeable for Dynamic Generics: FunctorM derive reasonable fmapM implementation HFoldable Strafunski hfoldr Monoid derive reasonable Data.Monoid implementation RMapM derive reasonable rmapM implementation Term Strafunski representation via Dynamic Prelude: Bounded Enum Eq Ord Read Show Representation: ATermConvertible encode terms in the ATerm format Haskell2Xml encode terms as XML (HaXml<=1.13) XmlContent encode terms as XML (HaXml>=1.14) Utility: Parse parse values back from standard 'Show' Query provide a QueryFoo class with 'is', 'has', 'from', and 'get' routines from provides fromFoo for each constructor get for label 'foo' provide foo_g to get it has hasfoo for record types is provides isFoo for each constructor test output raw data for testing un provides unFoo for unary constructors update for label 'foo' provides 'foo_u' to update it and foo_s to set it  File: drift.info, Node: Motivation, Next: An Example, Prev: Features, Up: Introduction 1.3 Why Do We Need DrIFT? ========================= The original motivation for DrIFT came from reading one of the Glasgow Parallel Haskell papers on Strategies. Strategies require producing instances of a class which reduces to normal form (called NFData). It was commented that it was a shame that instances of NFData couldn't be automatically derived; the rules to generate the instances are simple, and adding instances by hand is tiresome. Many classes' instances follow simple patterns. This is what makes coding up instances so tedious: there's no thought involved! The idea to extend DrIFT to work on imported types came from a discussion of the Haskell mailing list, arising from a point made by Olaf Chitil : Why is the automatic derivation of instances for some standard classes linked to data and newtype declarations? It happened already several times to me that I needed a standard instance of a data type that I imported from a module that did not provide that instance and which I did not want to change (a library; GHC, which I mainly want to extend by further modules, not spread changes over 250 modules). When declaring a new data type one normally avoids deriving (currently) unneeded instances, because it costs program code (and maybe one even wants to enable the user of the module to define his own instances). The third feature of DrIFT, providing utility functions to manipulate new types, especially records was caused by finding oneself writing the same sort of code over and over again. These functions couldn't be captured in a class, but have a similar form for each type they are defined on. A thread on the Haskell mailing list made a related point: untagging and manipulating newtypes was more cumbersome than it should be.  File: drift.info, Node: An Example, Prev: Motivation, Up: Introduction 1.4 An Example ============== Here's an example of what how DrIFT is used. This Haskell module contains commands to the DrIFT preprocessor. These are annotated with `{-! ... !-}'. After processing with DrIFT the generated code is glued on the bottom of the file, beneath a marker indicating where the new code starts. The machine generated code is quite long, and would really have been a drudge to type in by hand. * Menu: * Source Code:: * After processing with DrIFT::  File: drift.info, Node: Source Code, Next: After processing with DrIFT, Prev: An Example, Up: An Example 1.4.1 Source Code ----------------- -- example script for DrIFT module Example where import Foo {-!for Foo derive : Read,NFData !-} -- apply rules to imported type {-! global : is !-} -- global to this module {-!for Data derive : update,Show,Read!-} -- stand alone comand syntax {-!for Maybe derive : NFData !-} -- apply rules to prelude type data Data = D {name :: Name, constraints :: [(Class,Var)], vars :: [Var], body :: [(Constructor,[(Name,Type)])], derive :: [Class], statement :: Statement} data Statement = DataStmt | NewTypeStmt deriving Eq {-!derive : Ord,Show,Read !-} -- abbreviated syntax  File: drift.info, Node: After processing with DrIFT, Prev: Source Code, Up: An Example 1.4.2 After processing with DrIFT --------------------------------- module Example where import Foo {-!for Foo derive : Read,NFData !-} -- apply rules to imported type {-! global : is !-} -- global to this module {-!for Data derive : update,Show,Read!-} -- stand alone comand syntax {-!for Maybe derive : NFData !-} -- apply rules to prelude type data Data = D {name :: Name, constraints :: [(Class,Var)], vars :: [Var], body :: [(Constructor,[(Name,Type)])], derive :: [Class], statement :: Statement} data Statement = DataStmt | NewTypeStmt deriving Eq {-!derive : Ord,Show,Read !-} {-* Generated by DrIFT-v1.0 : Look, but Don't Touch. *-} isD (D aa ab ac ad ae af) = True isD _ = False instance Ord Statement where compare DataStmt (DataStmt) = EQ compare DataStmt (NewTypeStmt) = LT compare NewTypeStmt (DataStmt) = GT compare NewTypeStmt (NewTypeStmt) = EQ instance Show Statement where showsPrec d (DataStmt) = showString "DataStmt" showsPrec d (NewTypeStmt) = showString "NewTypeStmt" instance Read Statement where readsPrec d input = (\ inp -> [((DataStmt) , rest) | ("DataStmt" , rest) <- lex inp]) input ++ (\ inp -> [((NewTypeStmt) , rest) | ("NewTypeStmt" , rest) <- lex inp]) input isDataStmt (DataStmt) = True isDataStmt _ = False isNewTypeStmt (NewTypeStmt) = True isNewTypeStmt _ = False instance (NFData a) => NFData (Maybe a) where rnf (Just aa) = rnf aa rnf (Nothing) = () body_u f r@D{body} = r{body = f body} constraints_u f r@D{constraints} = r{constraints = f constraints} derive_u f r@D{derive} = r{derive = f derive} name_u f r@D{name} = r{name = f name} statement_u f r@D{statement} = r{statement = f statement} vars_u f r@D{vars} = r{vars = f vars} body_s v = body_u (const v) constraints_s v = constraints_u (const v) derive_s v = derive_u (const v) name_s v = name_u (const v) statement_s v = statement_u (const v) vars_s v = vars_u (const v) instance Show Data where showsPrec d (D aa ab ac ad ae af) = showParen (d >= 10) (showString "D" . showChar '{' . showString "name" . showChar '=' . showsPrec 10 aa . showChar ',' . showString "constraints" . showChar '=' . showsPrec 10 ab . showChar ',' . showString "vars" . showChar '=' . showsPrec 10 ac . showChar ',' . showString "body" . showChar '=' . showsPrec 10 ad . showChar ',' . showString "derive" . showChar '=' . showsPrec 10 ae . showChar ',' . showString "statement" . showChar '=' . showsPrec 10 af . showChar '}') instance Read Data where readsPrec d input = readParen (d > 9) (\ inp -> [((D aa ab ac ad ae af) , rest) | ("D" , inp) <- lex inp , ("{" , inp) <- lex inp , ("name" , inp) <- lex inp , ("=" , inp) <- lex inp , (aa , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("constraints" , inp) <- lex inp , ("=" , inp) <- lex inp , (ab , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("vars" , inp) <- lex inp , ("=" , inp) <- lex inp , (ac , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("body" , inp) <- lex inp , ("=" , inp) <- lex inp , (ad , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("derive" , inp) <- lex inp , ("=" , inp) <- lex inp , (ae , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("statement" , inp) <- lex inp , ("=" , inp) <- lex inp , (af , inp) <- readsPrec 10 inp , ("}" , rest) <- lex inp]) input -- Imported from other files :- instance Read Foo where readsPrec d input = (\ inp -> [((Foo) , rest) | ("Foo" , rest) <- lex inp]) input ++ (\ inp -> [((Bar) , rest) | ("Bar" , rest) <- lex inp]) input ++ (\ inp -> [((Bub) , rest) | ("Bub" , rest) <- lex inp]) input instance NFData Foo where rnf (Foo) = () rnf (Bar) = () rnf (Bub) = ()  File: drift.info, Node: User Guide, Next: Standard Rules, Prev: Introduction, Up: Top 2 User Guide ************ This chapter assumes that DrIFT has already been installed and the environment variables set up. The installation is handled in *note Installation::. Briefly, the way DrIFT works is 1. parse the input file, looking for commands and data & newtype statements. 2. generate code by executing the commands, which apply rules to types. 3. if any commands remain unexecuted, this means the types aren't declared in this module, so DrIFT searches for them in imported modules. 4. append the generated code to the bottom of the file (overwriting any previously generated code) Rules can be applied to any types defined using a `data' or `newtype' statement. Rules can't be applied to types defined using `type', as this only produces a synonym for a type. *Don't try to use rules on type synonyms.* * Menu: * Command Line:: * Command Syntax:: * Emacs DrIFT mode::  File: drift.info, Node: Command Line, Next: Command Syntax, Prev: User Guide, Up: User Guide 2.1 The Command Line ==================== DrIFT processes standard Haskell scripts (suffix `.hs') and literate scripts (suffix `.lhs'). Currently, only literate code using `>' is accepted: DrIFT doesn't understand the TeX style of literate programming using `\begin{code}'. If you've compiled up an executable from the source code (or are using Runhugs) to run DrIFT over a file type :- `DrIFT FILENAME' Alternatively, for Hugs, use :- `runhugs DrIFT FILENAME' (run DrIFT over filename)  File: drift.info, Node: Command Syntax, Next: Emacs DrIFT mode, Prev: Command Line, Up: User Guide 2.2 Command Syntax ================== Commands to DrIFT are entered into Haskell code in the form of _annotations_. DrIFT's annotations start with `{-!' and finish with `!-}'. (This is so they don't clash with the compiler annotations given to GHC or HBC). There are three forms of command. * *Stand-Alone Command* (syntax : `{-! for TYPE derive : RULE1,RULE2,... !-}') This is the basic form of DrIFT command. It asks DrIFT to apply the listed rules to the specified type. If the type is parameterised, e.g. `Maybe a', just enter the type name into the command, omitting any type variables. DrIFT assumes that types given are currently in scope, and will first search the current module. If it fails to find a matching type definition, the prelude and any imported modules are also searched. This is the only command which allows code to be generated for a type defined in another module. * *Abbreviated Command* (syntax : `{-! derive :RULE1,RULE2,... !-}') This command is appended to the end of a `data' or `newtype' definition, after the deriving clause, if present. It applies the listed rules to the type it is attached to. * *Global Command* (syntax : `{-! global :RULE1,RULE2,... !-}' This command applies the listed rules to all types defined within the module. Note that this command doesn't cause code to be generated for types imported from other modules. For an example of these commands in use, *Note An Example::. 2.2.1 Notes on Using Commands ----------------------------- * The stand-alone and global commands should be entered on a line by themselves, starting in the first column, (as with other top-level declarations, such as `infix', `import',`newtype'). It doesn't matter what position they occur within the module. * In a literate file, all commands should be entered on a `code' line (one starting with `>'). * Commands may be commented out by using `--' and `{- .. -}' in the usual way. * If two commands apply the same rule to a type, then two sets of identical code will be produced. This will cause a `multiple definition' error when the processed module is compiled/interpreted. *Don't do it!*  File: drift.info, Node: Emacs DrIFT mode, Prev: Command Syntax, Up: User Guide 2.3 Emacs DrIFT mode ==================== For Emacs fans, Hans W Loidl has written a script which allows DrIFT to be run within a buffer. The commands available are * `M-x hwl-derive', `C-c d d' runs DrIFT over the current buffer, and then updates the buffer. * `M-x hwl-derive-insert-standalone', `C-c d s' inserts a template for a standalone command into the current buffer at the cursor position. * `M-x hwl-derive-insert-local', `C-c d l' inserts a template for an abbreviated command. * `M-x hwl-derive-insert-global', `C-c d g' inserts a template for a global command In `hugs-mode' these functions are also available vie a menu item in the hugs menu.  File: drift.info, Node: Standard Rules, Next: User-Defined Rules, Prev: User Guide, Up: Top 3 Standard Rules **************** Heres a listing of the rules that come pre-defined with DrIFT. If you want a more detailed idea of how they work, their definitions are in the file `StandardRules.hs', and are (fairly) well documented. In the following list the *highlighted* text is the name of the rule, as used in commands. The naming convention for rules is names starting with a capital generate an instance for the class of the same name. Sets of functions are generated by a name beginning with a lower case letter. 3.1 Prelude Classes =================== The classes *Eq*, *Ord*, *Enum*, *Show*, *Read* & *Bounded* are described in the Haskell report as being derivable; DrIFT provides rules for all these. 3.2 Other Classes ================= Originally, *NFData* (for Normal Form evaluation strategies) was the only other class to have a rule. But now, there are rules for many more classes from 3rd-party libraries, e.g. *XmlContent* from HaXml, *Binary* from nhc98, *Term* from Strafunski, *FunctorM* for Generics, *Observable* for HOOD debugging, *Typeable* for dynamics, and so on. For a full list, use the `--list' command-line option. 3.3 Utilities ============= * *un* attempts to make newtypes a little nicer to use by providing an untagging function. This rule can only be used on types defined using `newtype'. For a type `newtype Foo a = F a', *un* produces the function `unFoo :: Foo a -> a'. * *is* produces predicates that indicate the presence of a constructor. This is only useful for multi-constructor datatypes (obviously). For a type `data Foo = Bar | Bub', *is* generates `isBar :: Foo -> Bool' and `isBub :: Foo -> Bool'. * *has* produces predicates that indicate the presence of a label. This can only be used with types where at least one of the constructors is a labelled record. Note that labels can be shared between constructors of the same type. For a type `data Foo a = F{bar :: a,bub :: Int}' *has* generates `hasbar :: Foo a-> Bool' and `hasbub :: Foo a -> Bool'. * *update* produces functions that update fields within a record type. This rule can only be used with a type where at least on of the constructors is a labelled record. For a type `data Foo a = F{bar :: a, bub ::Int}' *update* generates `bar_u :: (a -> a) -> Foo a -> Foo a' and `bub_u :: (Int -> Int) -> Foo a -> Foo a' which apply a function to a field of a record, and then return the updated record. If the value does not have the given field then the value is returned unchanged. `bar_s :: a -> Foo a -> Foo a' and `bub_s ::Int -> Foo a -> Foo a' are also generated, and are used to set the value of a field in a record. * *test* dumps the parsed representation of a datatype to the output. This is be useful for debugging new rules, as the user can see what information is stored about a particular type.  File: drift.info, Node: User-Defined Rules, Next: Installation, Prev: Standard Rules, Up: Top 4 Rolling Your Own ****************** Programmers who only wish to use the pre-defined rules in DrIFT don't need to read or understand the following section. However, as well as using the supplied rules, users are encouraged to add their own. There is a stub module `UserRules.hs' in the source, to which rules can be added. If a compiled version of DrIFT is being used, the program will then have to be recompiled before the new rules can be used. However, if the Runhugs standalone interpreter is used, this is not necessary. Due to the way Runhugs searches for modules to load, a user may have many copies of the UserRules module. The UserRules module in the current directory will be loaded first. If that is not present, then the `HUGSPATH' environment variable is searched for the module. So it is possible to have a default UserRules module, and specialised ones for particular projects. * Menu: * The Basic Idea:: * How is a Type Represented?:: * Pretty Printing :: * Utilities:: * Adding a new rule::  File: drift.info, Node: The Basic Idea, Next: How is a Type Represented?, Prev: User-Defined Rules, Up: User-Defined Rules 4.1 The Basic Idea ================== A rule is a tuple containing a string and a function. The string is the name of the rule, and is used in commands in an input file. The function maps between the abstract representation of a datatype and text to be output (A sort of un-parser, if you like). The best way to understand this is to have a look at the existing rules in `StandardRules.hs'. This module is quite well documented.  File: drift.info, Node: How is a Type Represented?, Next: Pretty Printing, Prev: The Basic Idea, Up: User-Defined Rules 4.2 How is a Type Represented? ============================== A type is represented within DrIFT using the following data definition. >data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) >data Data = D { name :: Name, -- type name > constraints :: [(Class,Var)], > vars :: [Var], -- Parameters > body :: [Body], > derives :: [Class], -- derived classes > statement :: Statement} > | Directive > | TypeName Name deriving (Eq,Show) >type Name = String >type Var = String >type Class = String A `Data' type represents one parsed `data' or `newtype' statement. These are held in a `D' constructor record (the `Directive' and `TypeName' constructors are just used internally by DrIFT). We'll now examine each of the fields in turn. * `name' holds the name of the new datatype as a string. * `constraints' list the type constraints for the type variables of the new type. e.g. for `data (Eq a) => Foo a = F a', the value of `constraints' would be `[("Eq","a")]'. * `vars' contains a list of the type variables in the type. For the previous example, this would simply be `["a"]' . * `body' is a list of the constructors of the type, and the information associated with them. We'll come back to this in a moment. * `derives' lists the classes that the type an instance of though using the `deriving' clause. * `statement' indicates whether the type was declared using a `newtype' or `data' statement 4.2.1 The Body -------------- >data Body = Body { constructor :: Constructor, > labels :: [Name], > types :: [Type]} deriving (Eq,Show) >type Constructor = String The body type holds information about one of the constructors of a type. `constructor' is self-explanatory. `labels' holds the names of labels of a record. This will be blank if the constructor isn't a record. `types' contains a representation of the type of each value within the constructor. The definition of `Type' is as follows. >data Type = Arrow Type Type -- fn > | Apply Type Type -- application > | Var String -- variable > | Con String -- constructor > | Tuple [Type] -- tuple > | List Type -- list > deriving (Eq,Show) Few of the deriving rules supplied have actually needed to use this type information, which I found quite surprising. If you do find you need to use it, one example is the Haskell2Xml rule.  File: drift.info, Node: Pretty Printing, Next: Utilities, Prev: How is a Type Represented?, Up: User-Defined Rules 4.3 Pretty Printing =================== Instead of producing a string as output, rules produce a value of type `Doc'. This type is defined in the Pretty Printing Library implemented by Simon Peyton-Jones. The pretty printer ensures that the code is formatted for readability, and also handles problems such as indentation. Constructing output using pretty printing combinators is easier and more structured than manipulating strings too. For those unfamiliar with these combinators, have a look at the module `Pretty.lhs' and the web page `http://www.cse.ogi.edu/~simonpj/' or for more detail the paper `The Design of a Pretty Printing Library, J. Hughes'  File: drift.info, Node: Utilities, Next: Adding a new rule, Prev: Pretty Printing, Up: User-Defined Rules 4.4 Utilities ============= Upon the pretty printing library, DrIFT defines some more formatting functions which make regularly occurring structures of code easier to write. These structures include simple instances, blocks of code, lists, etc. The utilities are in the module `RuleUtils.hs' and should be self explanatory.  File: drift.info, Node: Adding a new rule, Prev: Utilities, Up: User-Defined Rules 4.5 Adding a new rule ===================== A rule has type `type Rule = (String,Data -> Doc)'. Once you have written your mapping function and chosen an appropriate name for the rule, add this tuple to the list `userRules :: [Rule]' in module `UserRules.hs'. Recompile if necessary. DrIFT will then call this rule when its name occurs in a command in an input file.  File: drift.info, Node: Installation, Next: Bugs, Prev: User-Defined Rules, Up: Top 5 Installation ************** DrIFT isn't a large or complicated application, so it shouldn't be too hard for anyone to get it up and running. For the platform you want to install for, read the corresponding section below, then see *note Environment Variables:: * Menu: * GHC:: * Hugs:: * Runhugs:: * Environment Variables:: * Installing the Emacs DrIFT Mode::  File: drift.info, Node: GHC, Next: Hugs, Prev: Installation, Up: Installation 5.1 GHC ======= the automake script should automatically detect any ghc or nhc installation and use that to build and install DrIFT. First run `./configure' . To compile, type `make all'. The executable produced `DrIFT' can then be installed with `make install'.  File: drift.info, Node: Hugs, Next: Runhugs, Prev: GHC, Up: Installation 5.2 Hugs ======== The DrIFT code comes as a set of Haskell modules. You want to copy all these to somewhere in your `HUGSPATH', then you can load and run DrIFT in any directory.  File: drift.info, Node: Runhugs, Next: Environment Variables, Prev: Hugs, Up: Installation 5.3 Runhugs =========== Edit the first line of the the file `DrIFT' to point to your copy of `runhugs'. Copy `DrIFT' to somewhere on your `PATH', and the remainder of the source (`*.hs',`*.lhs') to a directory in your `HUGSPATH'  File: drift.info, Node: Environment Variables, Next: Installing the Emacs DrIFT Mode, Prev: Runhugs, Up: Installation 5.4 Environment Variables ========================= In you environment set `DERIVEPATH' to the list of directories you wish derive to search for modules / interfaces. `DERIVEPATH' is quite fussy about the format the list should take :- * each path should be separated by ':' * no space inserted anywhere * no final '/' on the end of a path For instance good - `/users/nww/share/hugs/lib:/users/nww/share/hugs/lib/hugs' bad - `/users/nww/share/hugs/lib/: /users/nww/share/hugs/lib/hugs/'  File: drift.info, Node: Installing the Emacs DrIFT Mode, Prev: Environment Variables, Up: Installation 5.5 Installing the Emacs DrIFT Mode =================================== Edit `derive.el' so that the variable `hwl-derive-cmd' contains your copy of the DrIFT executable. Place `derive.el' into a directory on your `load-path', byte-compile it and put the following command into your `.emacs' file: `(load "derive")'  File: drift.info, Node: Bugs, Prev: Installation, Up: Top 6 Bugs and Shortcomings *********************** * DrIFT doesn't check for commands applying the same rule to a type. * No support for TeX-style literate code.  Tag Table: Node: Top220 Node: Introduction541 Node: What does DrIFT do?1355 Node: Features1879 Node: Motivation4392 Node: An Example6314 Node: Source Code6872 Node: After processing with DrIFT7693 Node: User Guide12794 Node: Command Line13817 Node: Command Syntax14423 Node: Emacs DrIFT mode16814 Node: Standard Rules17630 Node: User-Defined Rules20819 Node: The Basic Idea21945 Node: How is a Type Represented?22507 Node: Pretty Printing25404 Node: Utilities26187 Node: Adding a new rule26627 Node: Installation27087 Node: GHC27544 Node: Hugs27897 Node: Runhugs28158 Node: Environment Variables28488 Node: Installing the Emacs DrIFT Mode29130 Node: Bugs29562  End Tag Table DrIFT-2.2.3/docs/drift.html0000644000076400007640000022544110753606663012331 00000000000000 DrIFT User Guide
[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1. Introduction

This is a guide to using DrIFT, a type sensitive preprocessor for Haskell 98.

DrIFT is a tool which parses a Haskell module for structures (data & newtype declarations) and commands. These commands cause rules to be fired on the parsed data, generating new code which is then appended to the bottom of the input file, or redirected to another. These rules are expressed as Haskell code, and it is intended that the user can add new rules as required.

DrIFT is written in pure Haskell 98, however code it generates is free to make use of extensions when appropriate. DrIFT is currently tested against hugs and ghc.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.1 So, What Does DrIFT do?

DrIFT allows derivation of instances for classes that aren't supported by the standard compilers. In addition, instances can be produced in separate modules to that containing the type declaration. This allows instances to be derived for a type after the original module has been compiled. As a bonus, simple utility functions can also be produced for types.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.2 Features

  • DrIFT comes with a set of rules to produce instances for all derivable classes given in the Prelude. There's a rule to produce instances of NFData (the original motivation of all this), and rules for utility functions on types also. The DrIFT implementation is also regularly updated with rules submitted by users.
  • Code is generated using pretty-printing combinators. This means that the output is (fairly) well formatted, and easy on the human eye.
  • Effort has been made to make the rule interface as easy to use as possible. This is to allow users to add rules to generate code specific to their own projects. As the rules are written in Haskell themselves, the user doesn't have to learn a new language syntax, and can use all Haskell's features.

Currently supported derivations are the following. This list is obtainable by running DrIFT -l.

Binary:
   Binary            efficient binary encoding of terms
   GhcBinary         byte sized binary encoding of terms
Debugging:
   Observable        HOOD observable
General:
   NFData            provides 'rnf' to reduce to normal form (deepSeq)
   Typeable          derive Typeable for Dynamic
Generics:
   FunctorM          derive reasonable fmapM implementation
   HFoldable         Strafunski hfoldr
   Monoid            derive reasonable Data.Monoid implementation
   RMapM             derive reasonable rmapM implementation
   Term              Strafunski representation via Dynamic
Prelude:
   Bounded
   Enum
   Eq
   Ord
   Read
   Show
Representation:
   ATermConvertible  encode terms in the ATerm format
   Haskell2Xml       encode terms as XML (HaXml<=1.13)
   XmlContent        encode terms as XML (HaXml>=1.14)
Utility:
   Parse             parse values back from standard 'Show'
   Query             provide a QueryFoo class with 'is', 'has',
                       'from', and 'get' routines
   from              provides fromFoo for each constructor
   get               for label 'foo' provide foo_g to get it
   has               hasfoo for record types
   is                provides isFoo for each constructor
   test              output raw data for testing
   un                provides unFoo for unary constructors
   update            for label 'foo' provides 'foo_u' to update it
                       and foo_s to set it


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.3 Why Do We Need DrIFT?

The original motivation for DrIFT came from reading one of the Glasgow Parallel Haskell papers on Strategies. Strategies require producing instances of a class which reduces to normal form (called NFData). It was commented that it was a shame that instances of NFData couldn't be automatically derived; the rules to generate the instances are simple, and adding instances by hand is tiresome. Many classes' instances follow simple patterns. This is what makes coding up instances so tedious: there's no thought involved!

The idea to extend DrIFT to work on imported types came from a discussion of the Haskell mailing list, arising from a point made by Olaf Chitil :

Why is the automatic derivation of instances for some standard classes linked to data and newtype declarations? It happened already several times to me that I needed a standard instance of a data type that I imported from a module that did not provide that instance and which I did not want to change (a library; GHC, which I mainly want to extend by further modules, not spread changes over 250 modules). When declaring a new data type one normally avoids deriving (currently) unneeded instances, because it costs program code (and maybe one even wants to enable the user of the module to define his own instances).

The third feature of DrIFT, providing utility functions to manipulate new types, especially records was caused by finding oneself writing the same sort of code over and over again. These functions couldn't be captured in a class, but have a similar form for each type they are defined on. A thread on the Haskell mailing list made a related point: untagging and manipulating newtypes was more cumbersome than it should be.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.4 An Example

Here's an example of what how DrIFT is used. This Haskell module contains commands to the DrIFT preprocessor. These are annotated with {-! ... !-}. After processing with DrIFT the generated code is glued on the bottom of the file, beneath a marker indicating where the new code starts. The machine generated code is quite long, and would really have been a drudge to type in by hand.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.4.1 Source Code

 
-- example script for DrIFT

module Example where
import Foo
{-!for Foo derive :  Read,NFData !-} -- apply rules to imported type

{-! global : is !-} -- global to this module
{-!for Data derive : update,Show,Read!-} -- stand alone comand syntax

{-!for Maybe derive : NFData !-} -- apply rules to prelude type

data Data = D {name :: Name,
			constraints :: [(Class,Var)],
			vars :: [Var],
			body :: [(Constructor,[(Name,Type)])],
			derive :: [Class],
			statement :: Statement}

data Statement = DataStmt | NewTypeStmt
        deriving Eq {-!derive : Ord,Show,Read !-} -- abbreviated syntax

[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

1.4.2 After processing with DrIFT

 
module Example where
import Foo
{-!for Foo derive : Read,NFData !-} -- apply rules to imported type

{-! global : is !-} -- global to this module
{-!for Data derive : update,Show,Read!-} -- stand alone comand syntax

{-!for Maybe derive : NFData !-} -- apply rules to prelude type

data Data = D {name :: Name,
                        constraints :: [(Class,Var)],
                        vars :: [Var],
                        body :: [(Constructor,[(Name,Type)])],
                        derive :: [Class],
                        statement :: Statement}

data Statement = DataStmt | NewTypeStmt
        deriving Eq {-!derive : Ord,Show,Read !-}

{-* Generated by DrIFT-v1.0 : Look, but Don't Touch. *-}
isD (D aa ab ac ad ae af) = True
isD _ = False

instance Ord Statement where
    compare DataStmt (DataStmt) = EQ
    compare DataStmt (NewTypeStmt) = LT
    compare NewTypeStmt (DataStmt) = GT
    compare NewTypeStmt (NewTypeStmt) = EQ

instance Show Statement where
    showsPrec d (DataStmt) = showString "DataStmt"
    showsPrec d (NewTypeStmt) = showString "NewTypeStmt"

instance Read Statement where
    readsPrec d input =
              (\ inp -> [((DataStmt) , rest)
                          | ("DataStmt" , rest) <- lex inp])
              input
              ++
              (\ inp ->
               [((NewTypeStmt) , rest)
                  | ("NewTypeStmt" , rest) <- lex inp])
              input

isDataStmt (DataStmt) = True
isDataStmt _ = False
isNewTypeStmt (NewTypeStmt) = True
isNewTypeStmt _ = False

instance (NFData a) => NFData (Maybe a) where
    rnf (Just aa) = rnf aa
    rnf (Nothing) = ()

body_u f r@D{body} = r{body = f body}
constraints_u f r@D{constraints} = r{constraints = f constraints}
derive_u f r@D{derive} = r{derive = f derive}
name_u f r@D{name} = r{name = f name}
statement_u f r@D{statement} = r{statement = f statement}
vars_u f r@D{vars} = r{vars = f vars}
body_s v =  body_u  (const v)
constraints_s v =  constraints_u  (const v)
derive_s v =  derive_u  (const v)
name_s v =  name_u  (const v)
statement_s v =  statement_u  (const v)
vars_s v =  vars_u  (const v)

instance Show Data where
    showsPrec d (D aa ab ac ad ae af) = showParen (d >= 10)
              (showString "D" . showChar '{' .
               showString "name" . showChar '=' . showsPrec 10 aa
               . showChar ',' .
               showString "constraints" . showChar '=' . showsPrec 10 ab
               . showChar ',' .
               showString "vars" . showChar '=' . showsPrec 10 ac
               . showChar ',' .
               showString "body" . showChar '=' . showsPrec 10 ad
               . showChar ',' .
               showString "derive" . showChar '=' . showsPrec 10 ae
               . showChar ',' .
               showString "statement" . showChar '=' . showsPrec 10 af
               . showChar '}')

instance Read Data where
    readsPrec d input =
          readParen (d > 9)
           (\ inp ->
            [((D aa ab ac ad ae af) , rest) | ("D" , inp) <- lex inp ,
             ("{" , inp) <- lex inp , ("name" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (aa , inp) <- readsPrec 10 inp ,
             ("," , inp) <- lex inp , ("constraints" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (ab , inp) <- readsPrec 10 inp ,
             ("," , inp) <- lex inp , ("vars" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (ac , inp) <- readsPrec 10 inp ,
             ("," , inp) <- lex inp , ("body" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (ad , inp) <- readsPrec 10 inp ,
             ("," , inp) <- lex inp , ("derive" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (ae , inp) <- readsPrec 10 inp ,
             ("," , inp) <- lex inp , ("statement" , inp) <- lex inp ,
             ("=" , inp) <- lex inp , (af , inp) <- readsPrec 10 inp ,
             ("}" , rest) <- lex inp])
           input

--  Imported from other files :-

instance Read Foo where
    readsPrec d input =
              (\ inp -> [((Foo) , rest)
                          | ("Foo" , rest) <- lex inp]) input
              ++
              (\ inp -> [((Bar) , rest)
                          | ("Bar" , rest) <- lex inp]) input
              ++
              (\ inp -> [((Bub) , rest)
                          | ("Bub" , rest) <- lex inp]) input

instance NFData Foo where
    rnf (Foo) = ()
    rnf (Bar) = ()
    rnf (Bub) = ()


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

2. User Guide

This chapter assumes that DrIFT has already been installed and the environment variables set up. The installation is handled in Installation.

Briefly, the way DrIFT works is

  1. parse the input file, looking for commands and data & newtype statements.
  2. generate code by executing the commands, which apply rules to types.
  3. if any commands remain unexecuted, this means the types aren't declared in this module, so DrIFT searches for them in imported modules.
  4. append the generated code to the bottom of the file (overwriting any previously generated code)

Rules can be applied to any types defined using a data or newtype statement. Rules can't be applied to types defined using type, as this only produces a synonym for a type. Don't try to use rules on type synonyms.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

2.1 The Command Line

DrIFT processes standard Haskell scripts (suffix ‘.hs’) and literate scripts (suffix ‘.lhs’). Currently, only literate code using > is accepted: DrIFT doesn't understand the TeX style of literate programming using \begin{code}.

If you've compiled up an executable from the source code (or are using Runhugs) to run DrIFT over a file type :-

DrIFT filename

Alternatively, for Hugs, use :-

runhugs DrIFT filename (run DrIFT over filename)


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

2.2 Command Syntax

Commands to DrIFT are entered into Haskell code in the form of annotations. DrIFT's annotations start with {-! and finish with !-}. (This is so they don't clash with the compiler annotations given to GHC or HBC). There are three forms of command.

  • Stand–Alone Command (syntax : {-! for type derive : rule1,rule2,… !-}) This is the basic form of DrIFT command. It asks DrIFT to apply the listed rules to the specified type. If the type is parameterised, e.g. Maybe a, just enter the type name into the command, omitting any type variables. DrIFT assumes that types given are currently in scope, and will first search the current module. If it fails to find a matching type definition, the prelude and any imported modules are also searched. This is the only command which allows code to be generated for a type defined in another module.
  • Abbreviated Command (syntax : {-! derive :rule1,rule2,… !-}) This command is appended to the end of a data or newtype definition, after the deriving clause, if present. It applies the listed rules to the type it is attached to.
  • Global Command (syntax : {-! global :rule1,rule2,… !-} This command applies the listed rules to all types defined within the module. Note that this command doesn't cause code to be generated for types imported from other modules.

For an example of these commands in use, See section An Example.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

2.2.1 Notes on Using Commands

  • The stand-alone and global commands should be entered on a line by themselves, starting in the first column, (as with other top-level declarations, such as infix, import,newtype). It doesn't matter what position they occur within the module.
  • In a literate file, all commands should be entered on a `code' line (one starting with >).
  • Commands may be commented out by using -- and {- .. -} in the usual way.
  • If two commands apply the same rule to a type, then two sets of identical code will be produced. This will cause a `multiple definition' error when the processed module is compiled/interpreted. Don't do it!

[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

2.3 Emacs DrIFT mode

For Emacs fans, Hans W Loidl hwloidl@dcs.gla.ac.uk has written a script which allows DrIFT to be run within a buffer.

The commands available are

  • M-x hwl-derive, C-c d d runs DrIFT over the current buffer, and then updates the buffer.
  • M-x hwl-derive-insert-standalone, C-c d s inserts a template for a standalone command into the current buffer at the cursor position.
  • M-x hwl-derive-insert-local, C-c d l inserts a template for an abbreviated command.
  • M-x hwl-derive-insert-global, C-c d g inserts a template for a global command

In `hugs-mode' these functions are also available vie a menu item in the hugs menu.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

3. Standard Rules

Heres a listing of the rules that come pre-defined with DrIFT. If you want a more detailed idea of how they work, their definitions are in the file ‘StandardRules.hs’, and are (fairly) well documented. In the following list the highlighted text is the name of the rule, as used in commands. The naming convention for rules is names starting with a capital generate an instance for the class of the same name. Sets of functions are generated by a name beginning with a lower case letter.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

3.1 Prelude Classes

The classes Eq, Ord, Enum, Show, Read & Bounded are described in the Haskell report as being derivable; DrIFT provides rules for all these.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

3.2 Other Classes

Originally, NFData (for Normal Form evaluation strategies) was the only other class to have a rule. But now, there are rules for many more classes from 3rd-party libraries, e.g. XmlContent from HaXml, Binary from nhc98, Term from Strafunski, FunctorM for Generics, Observable for HOOD debugging, Typeable for dynamics, and so on. For a full list, use the --list command-line option.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

3.3 Utilities

  • un attempts to make newtypes a little nicer to use by providing an untagging function. This rule can only be used on types defined using newtype.

    For a type newtype Foo a = F a,

    un produces the function unFoo :: Foo a -> a.

  • is produces predicates that indicate the presence of a constructor. This is only useful for multi-constructor datatypes (obviously).

    For a type data Foo = Bar | Bub, is generates

    isBar :: Foo -> Bool and isBub :: Foo -> Bool.

  • has produces predicates that indicate the presence of a label. This can only be used with types where at least one of the constructors is a labelled record. Note that labels can be shared between constructors of the same type.

    For a type data Foo a = F{bar :: a,bub :: Int} has generates

    hasbar :: Foo a-> Bool and hasbub :: Foo a -> Bool.

  • update produces functions that update fields within a record type. This rule can only be used with a type where at least on of the constructors is a labelled record.

    For a type data Foo a = F{bar :: a, bub ::Int} update generates

    bar_u :: (a -> a) -> Foo a -> Foo a and

    bub_u :: (Int -> Int) -> Foo a -> Foo a which apply a function to a field of a record, and then return the updated record. If the value does not have the given field then the value is returned unchanged.

    bar_s :: a -> Foo a -> Foo a and bub_s ::Int -> Foo a -> Foo a are also generated, and are used to set the value of a field in a record.

  • test dumps the parsed representation of a datatype to the output. This is be useful for debugging new rules, as the user can see what information is stored about a particular type.

[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4. Rolling Your Own

Programmers who only wish to use the pre-defined rules in DrIFT don't need to read or understand the following section. However, as well as using the supplied rules, users are encouraged to add their own. There is a stub module ‘UserRules.hs’ in the source, to which rules can be added.

If a compiled version of DrIFT is being used, the program will then have to be recompiled before the new rules can be used. However, if the Runhugs standalone interpreter is used, this is not necessary. Due to the way Runhugs searches for modules to load, a user may have many copies of the UserRules module. The UserRules module in the current directory will be loaded first. If that is not present, then the HUGSPATH environment variable is searched for the module. So it is possible to have a default UserRules module, and specialised ones for particular projects.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.1 The Basic Idea

A rule is a tuple containing a string and a function. The string is the name of the rule, and is used in commands in an input file. The function maps between the abstract representation of a datatype and text to be output (A sort of un-parser, if you like). The best way to understand this is to have a look at the existing rules in ‘StandardRules.hs’. This module is quite well documented.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.2 How is a Type Represented?

A type is represented within DrIFT using the following data definition.

 
>data Statement = DataStmt | NewTypeStmt deriving (Eq,Show)

>data Data = D {        name :: Name,           -- type name
>                       constraints :: [(Class,Var)],
>                       vars :: [Var],          -- Parameters
>                       body :: [Body],
>                       derives :: [Class],     -- derived classes
>                       statement :: Statement}
>          | Directive
>          | TypeName Name deriving (Eq,Show)

>type Name = String
>type Var = String
>type Class = String

A Data type represents one parsed data or newtype statement. These are held in a D constructor record (the Directive and TypeName constructors are just used internally by DrIFT). We'll now examine each of the fields in turn.

  • name holds the name of the new datatype as a string.
  • constraints list the type constraints for the type variables of the new type. e.g. for data (Eq a) => Foo a = F a, the value of constraints would be [("Eq","a")].
  • vars contains a list of the type variables in the type. For the previous example, this would simply be ["a"] .
  • body is a list of the constructors of the type, and the information associated with them. We'll come back to this in a moment.
  • derives lists the classes that the type an instance of though using the deriving clause.
  • statement indicates whether the type was declared using a newtype or data statement

[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.2.1 The Body

 
>data Body = Body { constructor :: Constructor,
>                   labels :: [Name],
>                   types :: [Type]} deriving (Eq,Show)

>type Constructor = String

The body type holds information about one of the constructors of a type. constructor is self-explanatory. labels holds the names of labels of a record. This will be blank if the constructor isn't a record. types contains a representation of the type of each value within the constructor. The definition of Type is as follows.

 
>data Type      = Arrow Type Type -- fn
>               | Apply Type Type -- application
>               | Var String      -- variable
>               | Con String      -- constructor
>               | Tuple [Type]    -- tuple
>               | List Type	  -- list
>			deriving (Eq,Show)

Few of the deriving rules supplied have actually needed to use this type information, which I found quite surprising. If you do find you need to use it, one example is the Haskell2Xml rule.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.3 Pretty Printing

Instead of producing a string as output, rules produce a value of type Doc. This type is defined in the Pretty Printing Library implemented by Simon Peyton-Jones. The pretty printer ensures that the code is formatted for readability, and also handles problems such as indentation. Constructing output using pretty printing combinators is easier and more structured than manipulating strings too. For those unfamiliar with these combinators, have a look at the module ‘Pretty.lhs’ and the web page http://www.cse.ogi.edu/~simonpj/ or for more detail the paper The Design of a Pretty Printing Library, J. Hughes


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.4 Utilities

Upon the pretty printing library, DrIFT defines some more formatting functions which make regularly occurring structures of code easier to write. These structures include simple instances, blocks of code, lists, etc. The utilities are in the module ‘RuleUtils.hs’ and should be self explanatory.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

4.5 Adding a new rule

A rule has type type Rule = (String,Data -> Doc). Once you have written your mapping function and chosen an appropriate name for the rule, add this tuple to the list userRules :: [Rule] in module ‘UserRules.hs’. Recompile if necessary. DrIFT will then call this rule when its name occurs in a command in an input file.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5. Installation

DrIFT isn't a large or complicated application, so it shouldn't be too hard for anyone to get it up and running. For the platform you want to install for, read the corresponding section below, then see Environment Variables


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5.1 GHC

the automake script should automatically detect any ghc or nhc installation and use that to build and install DrIFT. First run ./configure . To compile, type make all. The executable produced ‘DrIFT’ can then be installed with make install.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5.2 Hugs

The DrIFT code comes as a set of Haskell modules. You want to copy all these to somewhere in your HUGSPATH, then you can load and run DrIFT in any directory.


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5.3 Runhugs

Edit the first line of the the file ‘DrIFT’ to point to your copy of runhugs. Copy ‘DrIFT’ to somewhere on your PATH, and the remainder of the source (‘*.hs’,‘*.lhs’) to a directory in your HUGSPATH


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5.4 Environment Variables

In you environment set DERIVEPATH to the list of directories you wish derive to search for modules / interfaces.

DERIVEPATH is quite fussy about the format the list should take :-

  • each path should be separated by ':'
  • no space inserted anywhere
  • no final '/' on the end of a path

For instance

good - /users/nww/share/hugs/lib:/users/nww/share/hugs/lib/hugs

bad - /users/nww/share/hugs/lib/: /users/nww/share/hugs/lib/hugs/


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

5.5 Installing the Emacs DrIFT Mode

Edit ‘derive.el’ so that the variable hwl-derive-cmd contains your copy of the DrIFT executable. Place ‘derive.el’ into a directory on your load-path, byte-compile it and put the following command into your ‘.emacs’ file:

(load "derive")


[ < ] [ > ]   [ << ] [ Up ] [ >> ]         [Top] [Contents] [Index] [ ? ]

6. Bugs and Shortcomings

  • DrIFT doesn't check for commands applying the same rule to a type.
  • No support for TeX-style literate code.

[Top] [Contents] [Index] [ ? ]

Table of Contents


[Top] [Contents] [Index] [ ? ]

About This Document

This document was generated on February, 10 2008 using texi2html 1.78.

The buttons in the navigation panels have the following meaning:

Button Name Go to From 1.2.3 go to
[ < ] Back Previous section in reading order 1.2.2
[ > ] Forward Next section in reading order 1.2.4
[ << ] FastBack Beginning of this chapter or previous chapter 1
[ Up ] Up Up section 1.2
[ >> ] FastForward Next chapter 2
[Top] Top Cover (top) of document  
[Contents] Contents Table of contents  
[Index] Index Index  
[ ? ] About About (help)  

where the Example assumes that the current position is at Subsubsection One-Two-Three of a document of the following structure:

  • 1. Section One
    • 1.1 Subsection One-One
      • ...
    • 1.2 Subsection One-Two
      • 1.2.1 Subsubsection One-Two-One
      • 1.2.2 Subsubsection One-Two-Two
      • 1.2.3 Subsubsection One-Two-Three     <== Current Position
      • 1.2.4 Subsubsection One-Two-Four
    • 1.3 Subsection One-Three
      • ...
    • 1.4 Subsection One-Four

This document was generated on February, 10 2008 using texi2html 1.78.

DrIFT-2.2.3/docs/drift.texi0000644000076400007640000007104310753606147012330 00000000000000\input texinfo @c -*-texinfo-*- @include version.texi @c 1. Header @setfilename drift.info @settitle DrIFT User Guide @c 2. Summary Description and Copyright @ifinfo @dircategory Haskell Tools @direntry * DrIFT: (drift). A type sensitive preprocessor for Haskell 98. @end direntry @end ifinfo @c 3. Title and Copyright @titlepage @title DrIFT User Guide @subtitle version @value{VERSION} @subtitle @value{UPDATED} @author Noel Winstanley @author @email{nww@@dcs.gla.ac.uk} @author John Meacham @author @email{john@@foo.net} @end titlepage @c 4. `Top' Node and Master Menu @ifinfo @node Top, Introduction, (dir), (dir) @top DrIFT DrIFT is a type-sensitive preprocessor for Haskell. It is used to automatically generate code for new defined types. @end ifinfo @menu * Introduction:: * User Guide:: * Standard Rules:: * User-Defined Rules:: * Installation:: * Bugs:: @end menu @c 5. Body @node Introduction, User Guide, Top, Top @chapter Introduction This is a guide to using DrIFT, a type sensitive preprocessor for Haskell 98. DrIFT is a tool which parses a Haskell module for structures (data & newtype declarations) and commands. These commands cause rules to be fired on the parsed data, generating new code which is then appended to the bottom of the input file, or redirected to another. These rules are expressed as Haskell code, and it is intended that the user can add new rules as required. DrIFT is written in pure Haskell 98, however code it generates is free to make use of extensions when appropriate. DrIFT is currently tested against hugs and ghc. @menu * What does DrIFT do?:: * Features:: * Motivation:: * An Example:: @end menu @node What does DrIFT do?, Features, Introduction, Introduction @section So, What Does DrIFT do? DrIFT allows derivation of instances for classes that aren't supported by the standard compilers. In addition, instances can be produced in separate modules to that containing the type declaration. This allows instances to be derived for a type after the original module has been compiled. As a bonus, simple utility functions can also be produced for types. @node Features, Motivation, What does DrIFT do?, Introduction @section Features @itemize @bullet @item DrIFT comes with a set of rules to produce instances for all derivable classes given in the Prelude. There's a rule to produce instances of NFData (the original motivation of all this), and rules for utility functions on types also. The DrIFT implementation is also regularly updated with rules submitted by users. @c @item @c To find the definition of a type, DrIFT can search through imported @c modules and the prelude. In addition to literate and non-literate @c scripts, derive is able to extract information from @c interface files generated by the GHC compiler (prior to 5.04). @item Code is generated using pretty-printing combinators. This means that the output is (fairly) well formatted, and easy on the human eye. @item Effort has been made to make the rule interface as easy to use as possible. This is to allow users to add rules to generate code specific to their own projects. As the rules are written in Haskell themselves, the user doesn't have to learn a new language syntax, and can use all Haskell's features. @end itemize Currently supported derivations are the following. This list is obtainable by running @code{DrIFT -l}. @verbatim Binary: Binary efficient binary encoding of terms GhcBinary byte sized binary encoding of terms Debugging: Observable HOOD observable General: NFData provides 'rnf' to reduce to normal form (deepSeq) Typeable derive Typeable for Dynamic Generics: FunctorM derive reasonable fmapM implementation HFoldable Strafunski hfoldr Monoid derive reasonable Data.Monoid implementation RMapM derive reasonable rmapM implementation Term Strafunski representation via Dynamic Prelude: Bounded Enum Eq Ord Read Show Representation: ATermConvertible encode terms in the ATerm format Haskell2Xml encode terms as XML (HaXml<=1.13) XmlContent encode terms as XML (HaXml>=1.14) Utility: Parse parse values back from standard 'Show' Query provide a QueryFoo class with 'is', 'has', 'from', and 'get' routines from provides fromFoo for each constructor get for label 'foo' provide foo_g to get it has hasfoo for record types is provides isFoo for each constructor test output raw data for testing un provides unFoo for unary constructors update for label 'foo' provides 'foo_u' to update it and foo_s to set it @end verbatim @node Motivation, An Example, Features, Introduction @section Why Do We Need DrIFT? The original motivation for DrIFT came from reading one of the Glasgow Parallel Haskell papers on Strategies. Strategies require producing instances of a class which reduces to normal form (called NFData). It was commented that it was a shame that instances of NFData couldn't be automatically derived; the rules to generate the instances are simple, and adding instances by hand is tiresome. Many classes' instances follow simple patterns. This is what makes coding up instances so tedious: there's no thought involved! The idea to extend DrIFT to work on imported types came from a discussion of the Haskell mailing list, arising from a point made by Olaf Chitil : @quotation Why is the automatic derivation of instances for some standard classes linked to data and newtype declarations? It happened already several times to me that I needed a standard instance of a data type that I imported from a module that did not provide that instance and which I did not want to change (a library; GHC, which I mainly want to extend by further modules, not spread changes over 250 modules). When declaring a new data type one normally avoids deriving (currently) unneeded instances, because it costs program code (and maybe one even wants to enable the user of the module to define his own instances). @end quotation The third feature of DrIFT, providing utility functions to manipulate new types, especially records was caused by finding oneself writing the same sort of code over and over again. These functions couldn't be captured in a class, but have a similar form for each type they are defined on. A thread on the Haskell mailing list made a related point: untagging and manipulating newtypes was more cumbersome than it should be. @node An Example, , Motivation, Introduction @section An Example Here's an example of what how DrIFT is used. This Haskell module contains commands to the DrIFT preprocessor. These are annotated with @code{@{-! ... !-@}}. After processing with DrIFT the generated code is glued on the bottom of the file, beneath a marker indicating where the new code starts. The machine generated code is quite long, and would really have been a drudge to type in by hand. @menu * Source Code:: * After processing with DrIFT:: @end menu @node Source Code, After processing with DrIFT, An Example, An Example @subsection Source Code @example -- example script for DrIFT module Example where import Foo @{-!for Foo derive : Read,NFData !-@} -- apply rules to imported type @{-! global : is !-@} -- global to this module @{-!for Data derive : update,Show,Read!-@} -- stand alone comand syntax @{-!for Maybe derive : NFData !-@} -- apply rules to prelude type data Data = D @{name :: Name, constraints :: [(Class,Var)], vars :: [Var], body :: [(Constructor,[(Name,Type)])], derive :: [Class], statement :: Statement@} data Statement = DataStmt | NewTypeStmt deriving Eq @{-!derive : Ord,Show,Read !-@} -- abbreviated syntax @end example @node After processing with DrIFT, , Source Code, An Example @subsection After processing with DrIFT @example module Example where import Foo @{-!for Foo derive : Read,NFData !-@} -- apply rules to imported type @{-! global : is !-@} -- global to this module @{-!for Data derive : update,Show,Read!-@} -- stand alone comand syntax @{-!for Maybe derive : NFData !-@} -- apply rules to prelude type data Data = D @{name :: Name, constraints :: [(Class,Var)], vars :: [Var], body :: [(Constructor,[(Name,Type)])], derive :: [Class], statement :: Statement@} data Statement = DataStmt | NewTypeStmt deriving Eq @{-!derive : Ord,Show,Read !-@} @{-* Generated by DrIFT-v1.0 : Look, but Don't Touch. *-@} isD (D aa ab ac ad ae af) = True isD _ = False instance Ord Statement where compare DataStmt (DataStmt) = EQ compare DataStmt (NewTypeStmt) = LT compare NewTypeStmt (DataStmt) = GT compare NewTypeStmt (NewTypeStmt) = EQ instance Show Statement where showsPrec d (DataStmt) = showString "DataStmt" showsPrec d (NewTypeStmt) = showString "NewTypeStmt" instance Read Statement where readsPrec d input = (\ inp -> [((DataStmt) , rest) | ("DataStmt" , rest) <- lex inp]) input ++ (\ inp -> [((NewTypeStmt) , rest) | ("NewTypeStmt" , rest) <- lex inp]) input isDataStmt (DataStmt) = True isDataStmt _ = False isNewTypeStmt (NewTypeStmt) = True isNewTypeStmt _ = False instance (NFData a) => NFData (Maybe a) where rnf (Just aa) = rnf aa rnf (Nothing) = () body_u f r@@D@{body@} = r@{body = f body@} constraints_u f r@@D@{constraints@} = r@{constraints = f constraints@} derive_u f r@@D@{derive@} = r@{derive = f derive@} name_u f r@@D@{name@} = r@{name = f name@} statement_u f r@@D@{statement@} = r@{statement = f statement@} vars_u f r@@D@{vars@} = r@{vars = f vars@} body_s v = body_u (const v) constraints_s v = constraints_u (const v) derive_s v = derive_u (const v) name_s v = name_u (const v) statement_s v = statement_u (const v) vars_s v = vars_u (const v) instance Show Data where showsPrec d (D aa ab ac ad ae af) = showParen (d >= 10) (showString "D" . showChar '@{' . showString "name" . showChar '=' . showsPrec 10 aa . showChar ',' . showString "constraints" . showChar '=' . showsPrec 10 ab . showChar ',' . showString "vars" . showChar '=' . showsPrec 10 ac . showChar ',' . showString "body" . showChar '=' . showsPrec 10 ad . showChar ',' . showString "derive" . showChar '=' . showsPrec 10 ae . showChar ',' . showString "statement" . showChar '=' . showsPrec 10 af . showChar '@}') instance Read Data where readsPrec d input = readParen (d > 9) (\ inp -> [((D aa ab ac ad ae af) , rest) | ("D" , inp) <- lex inp , ("@{" , inp) <- lex inp , ("name" , inp) <- lex inp , ("=" , inp) <- lex inp , (aa , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("constraints" , inp) <- lex inp , ("=" , inp) <- lex inp , (ab , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("vars" , inp) <- lex inp , ("=" , inp) <- lex inp , (ac , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("body" , inp) <- lex inp , ("=" , inp) <- lex inp , (ad , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("derive" , inp) <- lex inp , ("=" , inp) <- lex inp , (ae , inp) <- readsPrec 10 inp , ("," , inp) <- lex inp , ("statement" , inp) <- lex inp , ("=" , inp) <- lex inp , (af , inp) <- readsPrec 10 inp , ("@}" , rest) <- lex inp]) input -- Imported from other files :- instance Read Foo where readsPrec d input = (\ inp -> [((Foo) , rest) | ("Foo" , rest) <- lex inp]) input ++ (\ inp -> [((Bar) , rest) | ("Bar" , rest) <- lex inp]) input ++ (\ inp -> [((Bub) , rest) | ("Bub" , rest) <- lex inp]) input instance NFData Foo where rnf (Foo) = () rnf (Bar) = () rnf (Bub) = () @end example @node User Guide, Standard Rules, Introduction, Top @chapter User Guide This chapter assumes that DrIFT has already been installed and the environment variables set up. The installation is handled in @ref{Installation}. Briefly, the way DrIFT works is @enumerate @item parse the input file, looking for commands and data & newtype statements. @item generate code by executing the commands, which apply rules to types. @item if any commands remain unexecuted, this means the types aren't declared in this module, so DrIFT searches for them in imported modules. @item append the generated code to the bottom of the file (overwriting any previously generated code) @end enumerate Rules can be applied to any types defined using a @code{data} or @code{newtype} statement. Rules can't be applied to types defined using @code{type}, as this only produces a synonym for a type. @strong{Don't try to use rules on type synonyms.} @menu * Command Line:: * Command Syntax:: * Emacs DrIFT mode:: @end menu @node Command Line, Command Syntax, User Guide, User Guide @section The Command Line DrIFT processes standard Haskell scripts (suffix @file{.hs}) and literate scripts (suffix @file{.lhs}). Currently, only literate code using @code{>} is accepted: DrIFT doesn't understand the @TeX{} style of literate programming using @code{\begin@{code@}}. If you've compiled up an executable from the source code (or are using Runhugs) to run DrIFT over a file type :- @code{DrIFT @var{filename}} Alternatively, for Hugs, use :- @code{runhugs DrIFT @var{filename}} (run DrIFT over filename) @node Command Syntax, Emacs DrIFT mode, Command Line, User Guide @section Command Syntax Commands to DrIFT are entered into Haskell code in the form of @emph{annotations}. DrIFT's annotations start with @code{@{-!} and finish with @code{!-@}}. (This is so they don't clash with the compiler annotations given to GHC or HBC). There are three forms of command. @itemize @bullet @item @strong{Stand--Alone Command} (syntax : @code{@{-! for @var{type} derive : @var{rule1},@var{rule2},@dots{} !-@}}) This is the basic form of DrIFT command. It asks DrIFT to apply the listed rules to the specified type. If the type is parameterised, e.g. @code{Maybe a}, just enter the type name into the command, omitting any type variables. DrIFT assumes that types given are currently in scope, and will first search the current module. If it fails to find a matching type definition, the prelude and any imported modules are also searched. This is the only command which allows code to be generated for a type defined in another module. @item @strong{Abbreviated Command} (syntax : @code{@{-! derive :@var{rule1},@var{rule2},@dots{} !-@}}) This command is appended to the end of a @code{data} or @code{newtype} definition, after the deriving clause, if present. It applies the listed rules to the type it is attached to. @item @strong{Global Command} (syntax : @code{@{-! global :@var{rule1},@var{rule2},@dots{} !-@}} This command applies the listed rules to all types defined within the module. Note that this command doesn't cause code to be generated for types imported from other modules. @end itemize For an example of these commands in use, @xref{An Example}. @subsection Notes on Using Commands @itemize @bullet @item The stand-alone and global commands should be entered on a line by themselves, starting in the first column, (as with other top-level declarations, such as @code{infix}, @code{import},@code{newtype}). It doesn't matter what position they occur within the module. @item In a literate file, all commands should be entered on a `code' line (one starting with @code{>}). @item Commands may be commented out by using @code{--} and @code{@{- .. -@}} in the usual way. @item If two commands apply the same rule to a type, then two sets of identical code will be produced. This will cause a `multiple definition' error when the processed module is compiled/interpreted. @strong{Don't do it!} @end itemize @node Emacs DrIFT mode, , Command Syntax, User Guide @section Emacs DrIFT mode For Emacs fans, Hans W Loidl @email{hwloidl@@dcs.gla.ac.uk} has written a script which allows DrIFT to be run within a buffer. The commands available are @itemize @bullet @item @code{M-x hwl-derive}, @code{C-c d d} runs DrIFT over the current buffer, and then updates the buffer. @item @code{M-x hwl-derive-insert-standalone}, @code{C-c d s} inserts a template for a standalone command into the current buffer at the cursor position. @item @code{M-x hwl-derive-insert-local}, @code{C-c d l} inserts a template for an abbreviated command. @item @code{M-x hwl-derive-insert-global}, @code{C-c d g} inserts a template for a global command @end itemize In `hugs-mode' these functions are also available vie a menu item in the hugs menu. @node Standard Rules, User-Defined Rules, User Guide, Top @chapter Standard Rules Heres a listing of the rules that come pre-defined with DrIFT. If you want a more detailed idea of how they work, their definitions are in the file @file{StandardRules.hs}, and are (fairly) well documented. In the following list the @strong{highlighted} text is the name of the rule, as used in commands. The naming convention for rules is names starting with a capital generate an instance for the class of the same name. Sets of functions are generated by a name beginning with a lower case letter. @section Prelude Classes The classes @strong{Eq}, @strong{Ord}, @strong{Enum}, @strong{Show}, @strong{Read} & @strong{Bounded} are described in the Haskell report as being derivable; DrIFT provides rules for all these. @section Other Classes Originally, @strong{NFData} (for Normal Form evaluation strategies) was the only other class to have a rule. But now, there are rules for many more classes from 3rd-party libraries, e.g. @strong{XmlContent} from HaXml, @strong{Binary} from nhc98, @strong{Term} from Strafunski, @strong{FunctorM} for Generics, @strong{Observable} for HOOD debugging, @strong{Typeable} for dynamics, and so on. For a full list, use the @code{--list} command-line option. @section Utilities @itemize @bullet @item @strong{un} attempts to make newtypes a little nicer to use by providing an untagging function. This rule can only be used on types defined using @code{newtype}. @quotation For a type @code{newtype Foo a = F a}, @strong{un} produces the function @code{unFoo :: Foo a -> a}. @end quotation @item @strong{is} produces predicates that indicate the presence of a constructor. This is only useful for multi-constructor datatypes (obviously). @quotation For a type @code{data Foo = Bar | Bub}, @strong{is} generates @code{isBar :: Foo -> Bool} and @code{isBub :: Foo -> Bool}. @end quotation @item @strong{has} produces predicates that indicate the presence of a label. This can only be used with types where at least one of the constructors is a labelled record. Note that labels can be shared between constructors of the same type. @quotation For a type @code{data Foo a = F@{bar :: a,bub :: Int@}} @strong{has} generates @code{hasbar :: Foo a-> Bool} and @code{hasbub :: Foo a -> Bool}. @end quotation @item @strong{update} produces functions that update fields within a record type. This rule can only be used with a type where at least on of the constructors is a labelled record. @quotation For a type @code{data Foo a = F@{bar :: a, bub ::Int@}} @strong{update} generates @code{bar_u :: (a -> a) -> Foo a -> Foo a} and @code{bub_u :: (Int -> Int) -> Foo a -> Foo a} which apply a function to a field of a record, and then return the updated record. If the value does not have the given field then the value is returned unchanged. @code{bar_s :: a -> Foo a -> Foo a} and @code{bub_s ::Int -> Foo a -> Foo a} are also generated, and are used to set the value of a field in a record. @end quotation @item @strong{test} dumps the parsed representation of a datatype to the output. This is be useful for debugging new rules, as the user can see what information is stored about a particular type. @end itemize @node User-Defined Rules, Installation, Standard Rules, Top @chapter Rolling Your Own Programmers who only wish to use the pre-defined rules in DrIFT don't need to read or understand the following section. However, as well as using the supplied rules, users are encouraged to add their own. There is a stub module @file{UserRules.hs} in the source, to which rules can be added. If a compiled version of DrIFT is being used, the program will then have to be recompiled before the new rules can be used. However, if the Runhugs standalone interpreter is used, this is not necessary. Due to the way Runhugs searches for modules to load, a user may have many copies of the UserRules module. The UserRules module in the current directory will be loaded first. If that is not present, then the @code{HUGSPATH} environment variable is searched for the module. So it is possible to have a default UserRules module, and specialised ones for particular projects. @menu * The Basic Idea:: * How is a Type Represented?:: * Pretty Printing :: * Utilities:: * Adding a new rule:: @end menu @node The Basic Idea, How is a Type Represented?, User-Defined Rules, User-Defined Rules @section The Basic Idea A rule is a tuple containing a string and a function. The string is the name of the rule, and is used in commands in an input file. The function maps between the abstract representation of a datatype and text to be output (A sort of un-parser, if you like). The best way to understand this is to have a look at the existing rules in @file{StandardRules.hs}. This module is quite well documented. @node How is a Type Represented?, Pretty Printing , The Basic Idea, User-Defined Rules @section How is a Type Represented? A type is represented within DrIFT using the following data definition. @example >data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) >data Data = D @{ name :: Name, -- type name > constraints :: [(Class,Var)], > vars :: [Var], -- Parameters > body :: [Body], > derives :: [Class], -- derived classes > statement :: Statement@} > | Directive > | TypeName Name deriving (Eq,Show) >type Name = String >type Var = String >type Class = String @end example A @code{Data} type represents one parsed @code{data} or @code{newtype} statement. These are held in a @code{D} constructor record (the @code{Directive} and @code{TypeName} constructors are just used internally by DrIFT). We'll now examine each of the fields in turn. @itemize @bullet @item @code{name} holds the name of the new datatype as a string. @item @code{constraints} list the type constraints for the type variables of the new type. e.g. for @code{data (Eq a) => Foo a = F a}, the value of @code{constraints} would be @code{[("Eq","a")]}. @item @code{vars} contains a list of the type variables in the type. For the previous example, this would simply be @code{["a"]} . @item @code{body} is a list of the constructors of the type, and the information associated with them. We'll come back to this in a moment. @item @code{derives} lists the classes that the type an instance of though using the @code{deriving} clause. @item @code{statement} indicates whether the type was declared using a @code{newtype} or @code{data} statement @end itemize @subsection The Body @example >data Body = Body @{ constructor :: Constructor, > labels :: [Name], > types :: [Type]@} deriving (Eq,Show) >type Constructor = String @end example The body type holds information about one of the constructors of a type. @code{constructor} is self-explanatory. @code{labels} holds the names of labels of a record. This will be blank if the constructor isn't a record. @code{types} contains a representation of the type of each value within the constructor. The definition of @code{Type} is as follows. @example >data Type = Arrow Type Type -- fn > | Apply Type Type -- application > | Var String -- variable > | Con String -- constructor > | Tuple [Type] -- tuple > | List Type -- list > deriving (Eq,Show) @end example Few of the deriving rules supplied have actually needed to use this type information, which I found quite surprising. If you do find you need to use it, one example is the Haskell2Xml rule. @node Pretty Printing , Utilities, How is a Type Represented?, User-Defined Rules @section Pretty Printing Instead of producing a string as output, rules produce a value of type @code{Doc}. This type is defined in the Pretty Printing Library implemented by Simon Peyton-Jones. The pretty printer ensures that the code is formatted for readability, and also handles problems such as indentation. Constructing output using pretty printing combinators is easier and more structured than manipulating strings too. For those unfamiliar with these combinators, have a look at the module @file{Pretty.lhs} and the web page @url{http://www.cse.ogi.edu/~simonpj/} or for more detail the paper @cite{The Design of a Pretty Printing Library, J. Hughes} @node Utilities, Adding a new rule, Pretty Printing , User-Defined Rules @section Utilities Upon the pretty printing library, DrIFT defines some more formatting functions which make regularly occurring structures of code easier to write. These structures include simple instances, blocks of code, lists, etc. The utilities are in the module @file{RuleUtils.hs} and should be self explanatory. @node Adding a new rule, , Utilities, User-Defined Rules @section Adding a new rule A rule has type @code{type Rule = (String,Data -> Doc)}. Once you have written your mapping function and chosen an appropriate name for the rule, add this tuple to the list @code{userRules :: [Rule]} in module @file{UserRules.hs}. Recompile if necessary. DrIFT will then call this rule when its name occurs in a command in an input file. @node Installation, Bugs, User-Defined Rules, Top @chapter Installation DrIFT isn't a large or complicated application, so it shouldn't be too hard for anyone to get it up and running. For the platform you want to install for, read the corresponding section below, then see @ref{Environment Variables} @menu * GHC:: * Hugs:: * Runhugs:: * Environment Variables:: * Installing the Emacs DrIFT Mode:: @end menu @node GHC, Hugs, Installation, Installation @section GHC the automake script should automatically detect any ghc or nhc installation and use that to build and install DrIFT. First run @code{./configure} . To compile, type @code{make all}. The executable produced @file{DrIFT} can then be installed with @code{make install}. @node Hugs, Runhugs, GHC, Installation @section Hugs The DrIFT code comes as a set of Haskell modules. You want to copy all these to somewhere in your @code{HUGSPATH}, then you can load and run DrIFT in any directory. @node Runhugs, Environment Variables, Hugs, Installation @section Runhugs Edit the first line of the the file @file{DrIFT} to point to your copy of @code{runhugs}. Copy @file{DrIFT} to somewhere on your @code{PATH}, and the remainder of the source (@file{*.hs},@file{*.lhs}) to a directory in your @code{HUGSPATH} @node Environment Variables, Installing the Emacs DrIFT Mode , Runhugs, Installation @section Environment Variables In you environment set @code{DERIVEPATH} to the list of directories you wish derive to search for modules / interfaces. @code{DERIVEPATH} is quite fussy about the format the list should take :- @itemize @bullet @item each path should be separated by ':' @item no space inserted anywhere @item no final '/' on the end of a path @end itemize For instance good - @code{/users/nww/share/hugs/lib:/users/nww/share/hugs/lib/hugs} bad - @code{/users/nww/share/hugs/lib/: /users/nww/share/hugs/lib/hugs/} @node Installing the Emacs DrIFT Mode, , Environment Variables, Installation @section Installing the Emacs DrIFT Mode Edit @file{derive.el} so that the variable @code{hwl-derive-cmd} contains your copy of the DrIFT executable. Place @file{derive.el} into a directory on your @code{load-path}, byte-compile it and put the following command into your @file{.emacs} file: @code{(load "derive")} @node Bugs, , Installation, Top @chapter Bugs and Shortcomings @itemize @bullet @item DrIFT doesn't check for commands applying the same rule to a type. @item No support for @TeX{}-style literate code. @end itemize @contents @bye DrIFT-2.2.3/AUTHORS0000644000076400007640000000057210753606147010444 00000000000000DrIFT was originally created by * Noel Winstanley An updated implementation for Haskell 98 was produced by: * Malcolm Wallace The installation mechanism was improved by: * Joost Visser (Joost.Visser@cwi.nl) The instance derivation rules for Typeable, Term (based on Dynamic), and other classes were contributed by * John Meacham For details, see the darcs changelog DrIFT-2.2.3/aclocal.m40000644000076400007640000004742110753606415011236 00000000000000# generated automatically by aclocal 1.10 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_if(m4_PACKAGE_VERSION, [2.61],, [m4_fatal([this file was generated for autoconf 2.61. You have another version of autoconf. If you want to use that, you should regenerate the build system entirely.], [63])]) # Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.10' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.10], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AC_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.10])dnl _AM_AUTOCONF_VERSION(m4_PACKAGE_VERSION)]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to # `$srcdir', `$srcdir/..', or `$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is `.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 12 # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.60])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) AM_MISSING_PROG(AUTOCONF, autoconf) AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) AM_MISSING_PROG(AUTOHEADER, autoheader) AM_MISSING_PROG(MAKEINFO, makeinfo) AM_PROG_INSTALL_SH AM_PROG_INSTALL_STRIP AC_REQUIRE([AM_PROG_MKDIR_P])dnl # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES(CC)], [define([AC_PROG_CC], defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES(CXX)], [define([AC_PROG_CXX], defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES(OBJC)], [define([AC_PROG_OBJC], defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl ]) ]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $1 | $1:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $1" >`AS_DIRNAME([$1])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl install_sh=${install_sh-"\$(SHELL) $am_aux_dir/install-sh"} AC_SUBST(install_sh)]) # Copyright (C) 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it supports --run. # If it does, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= AC_MSG_WARN([`missing' script is too old or missing]) fi ]) # Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_MKDIR_P # --------------- # Check for `mkdir -p'. AC_DEFUN([AM_PROG_MKDIR_P], [AC_PREREQ([2.60])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, dnl while keeping a definition of mkdir_p for backward compatibility. dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of dnl Makefile.ins that do not define MKDIR_P, so we do our own dnl adjustment using top_builddir (which is defined more often than dnl MKDIR_P). AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl case $mkdir_p in [[\\/$]]* | ?:[[\\/]]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 3 # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # ------------------------------ # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), 1)]) # _AM_SET_OPTIONS(OPTIONS) # ---------------------------------- # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [AC_FOREACH([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Just in case sleep 1 echo timestamp > conftest.file # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t $srcdir/configure conftest.file` fi rm -f conftest.file if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT(yes)]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor `install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in `make install-strip', and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be `maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputing VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of `v7', `ustar', or `pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. AM_MISSING_PROG([AMTAR], [tar]) m4_if([$1], [v7], [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], [m4_case([$1], [ustar],, [pax],, [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' _am_tools=${am_cv_prog_tar_$1-$_am_tools} # Do not fold the above two line into one, because Tru64 sh and # Solaris sh will not grok spaces in the rhs of `-'. for _am_tool in $_am_tools do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([ac-macros/acincludepackage.m4]) DrIFT-2.2.3/src/0000777000076400007640000000000010753607202010234 500000000000000DrIFT-2.2.3/src/ChaseImports.hs0000644000076400007640000001072110753606147013115 00000000000000{- this module coordinates the whole shebang. First splits input into `of interest' and `computer generated' Then parses 'of interest', and plucks out data and newtype declarations and processor commands The commands are combined with the parsed data, and if any data is missing, derive goes hunting for it, looking in likely script and interface files in the path variable DERIVEPATH. Derive searches recusively though modules imported until all the types needed are found, or it runs out of links, which causes an error -} --GHC version module ChaseImports( fromLit, ToDo(), isData, resolve, codeSeperator, chaseImports, parser, userCode ) where import RuleUtils (Tag) import DataP import CommandP import ParseLib2 import System import List import qualified Unlit import Monad import GenUtil try x = catch (x >>= return . Right) (return . Left) --- Split up input --------------------------------------------------------- splitString :: String -> String -> (String,String) splitString s = (\(x,y) -> (unlines x,unlines y)) . break (\x -> x == s || x == '>':s) . lines userCode = splitString codeSeperator codeSeperator = "{-* Generated by DrIFT : Look, but Don't Touch. *-}" -- Parser - extract data and newtypes from code type ToDo = [([Tag],Data)] parser :: String -> ToDo parser = sanitycheck . papply p (0,0) . \s -> ((0,0),s) where p = parse . skipUntilOff $ statement +++ command statement = do d <- datadecl +++ newtypedecl ts <- opt local return (ts,d) sanitycheck [] = error "***Error: no DrIFT directives found\n" sanitycheck [(x,_)] = x sanitycheck ((x,_):_) = error "***Error: ambiguous DriFT directives?" -------Go Hunting for files, recursively ---------------------------------- chaseImports :: String -> ToDo -> IO ToDo chaseImports txt dats = do (left,found) <- chaseImports' txt dats if (not . null) left then error ("can't find type " ++ show left) else return found chaseImports' :: String -> ToDo -> IO (ToDo,ToDo) chaseImports' text dats = case papply (parse header) (0,-1) ((0,0),text) of [] -> return (dats,[]) (modnames:_) -> foldM action (dats,[]) (fst modnames) where action :: (ToDo,ToDo) -> FilePath -> IO (ToDo,ToDo) action (dats,done) m | null dats = return ([],done) | otherwise = do mp <- ioM $ getEnv "DERIVEPATH" let paths = maybe [] breakPaths mp c <- findModule paths m let (found,rest) = scanModule dats c if (null rest) then return ([],done ++ found) -- finished else do (dats',done') <- chaseImports' c rest return (dats',done' ++ done ++ found) -- break DERIVEPATH into it's components breakPaths :: String -> [String] breakPaths x = case break (==':') x of (p,(_:pp)) -> p: breakPaths pp (p,[]) -> [p] -- search though paths, using try findModule :: [String] -> String -> IO String findModule paths modname = let action p = try $ do h <- readFile p return (h,p,".lhs" `isSuffixOf` p) fnames = combine paths modname isLeft (Left _ ) = True isLeft _ = False in do hh <- mapM action fnames let (h,p,l) = case dropWhile (isLeft) hh of ((Right h):_) -> h _ -> error ("can't find module " ++ modname) putStrLn $ "-- " ++ p return $ fromLit l h -- generate filepaths by combining module names with different suffixes. combine :: [String] -> String -> [FilePath] combine paths modname = [p++'/':f| f <- toFile modname, p <- ("." :paths)] where toFile :: String -> [String] toFile l = [l++".hs",l++".lhs"] -- pluck out the bits of interest scanModule :: ToDo -> String -> (ToDo,ToDo) scanModule dats txt = let newDats = filter isData . parse $ txt parse l = map snd . parser . fst . userCode $ l in (resolve newDats dats ([],[])) -- update what's still missing resolve :: [Data] -> ToDo -> (ToDo,ToDo) -> (ToDo,ToDo) resolve _ [] acc = acc resolve parsed ((tags,TypeName t):tt) (local,imports) = case filter ((== t) . name) parsed of [x] -> resolve parsed tt ((tags,x):local,imports) _ -> resolve parsed tt (local,(tags,TypeName t):imports) --handle literate scripts --------------------------------------------------- -- NB we don't do the latex-style literate scripts currently. fromLit True txt = Unlit.unlit "" txt fromLit False txt = txt --isLiterate :: String -> Bool --isLiterate = any ((==">"). take 1) . lines -- utils -- this should be the sort of thing automatically generated !! isData D{} = True isData _ = False DrIFT-2.2.3/src/Version.hs.in0000644000076400007640000000020610753606147012543 00000000000000module Version(package, version, fullName) where package = "@PACKAGE@" version = "@VERSION@" fullName = package ++ "-" ++ version DrIFT-2.2.3/src/RuleUtils.hs0000644000076400007640000000743310753606147012452 00000000000000-- utilities for writing new rules. module RuleUtils (module Pretty,module RuleUtils, module DataP)where import Pretty import DataP (Statement(..),Data(..),Type(..),Name,Var,Class, Body(..),Constructor) -- Rule Declarations type Tag = String type Rule = (Tag,Data -> Doc) -- Rule (name, rule, category, helpline, helptext) type RuleDef = (Tag, Data -> Doc, String, String, Maybe String) x = text "x" f = text "f" rArrow = text "->" lArrow = text "<-" --equals = text "=" blank = text "_" semicolon = char ';' prettyType :: Type -> Doc --prettyType (Apply t1 t2) = parens (prettyType t1 <+> prettyType t2) prettyType (Arrow x y) = parens (prettyType x <+> text "->" <+> prettyType y) prettyType (List x) = brackets (prettyType x) prettyType (Tuple xs) = tuple (map prettyType xs) prettyType (Var s) = text s prettyType (Con s) = text s prettyType (LApply t ts) = prettyType t <+> hsep (map prettyType ts) -- New Pretty Printers --------------- texts :: [String] -> [Doc] texts = map text block, blockList,parenList,bracketList :: [Doc] -> Doc block = nest 4 . vcat blockList = braces . fcat . sepWith semi parenList = parens . fcat . sepWith comma bracketList = brackets . fcat . sepWith comma -- for bulding m1 >> m2 >> m3, f . g . h, etc sepWith :: a -> [a] -> [a] sepWith _ [] = [] sepWith a [x] = [x] sepWith a (x:xs) = x:a: sepWith a xs --optional combinator, applys fn if arg is non-[] opt :: [a] -> ([a] -> Doc) -> Doc opt [] f = empty opt a f = f a --equivalent of `opt' for singleton lists opt1 :: [a] -> ([a] -> Doc) -> (a -> Doc) -> Doc opt1 [] _ _ = empty opt1 [x] _ g = g x opt1 a f g = f a -- new simple docs commentLine x = text "--" <+> x -- useful for warnings / error messages commentBlock x = text "{-" <> x <> text "-}" --- Utility Functions ------------------------------------------------------- -- Instances -- instance header, handling class constraints etc. simpleInstance :: Class -> Data -> Doc simpleInstance s d = hsep [text "instance" , opt constr (\x -> parenList x <+> text "=>") , text s , opt1 (texts (name d : vars d)) parenSpace id] where constr = map (\(c,v) -> text c <+> text v) (constraints d) ++ map (\x -> text s <+> text x) (vars d) parenSpace = parens . hcat . sepWith space -- instanceSkeleton handles most instance declarations, where instance -- functions are not related to one another. A member function is generated -- using a (IFunction,Doc) pair. The IFunction is applied to each body of the -- type, creating a block of pattern - matching cases. Default cases can be -- given using the Doc in the pair. If a default case is not required, set -- Doc to 'empty' type IFunction = Body -> Doc -- instance function instanceSkeleton :: Class -> [(IFunction,Doc)] -> Data -> Doc instanceSkeleton s ii d = (simpleInstance s d <+> text "where") $$ block functions where functions = concatMap f ii f (i,dflt) = map i (body d) ++ [dflt] -- little variable name generator, generates (length l) unique names aa - aZ varNames :: [a] -> [Doc] varNames l = take (length l) names where names = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] -- variant generating aa' - aZ' varNames' :: [a] -> [Doc] varNames' = map (<> (char '\'')) . varNames -- pattern matching a constructor and args pattern :: Constructor -> [a] -> Doc pattern c l = parens $ fsep (text c : varNames l) pattern_ :: Constructor -> [a] -> Doc pattern_ c l = parens $ fsep (text c : replicate (length l) (text "_")) pattern' :: Constructor -> [a] -> Doc pattern' c l = parens $ fsep (text c : varNames' l) -- test that a datatype has at least one record constructor hasRecord :: Data -> Bool hasRecord d = statement d == DataStmt && any (not . null . labels) (body d) tuple :: [Doc] -> Doc tuple xs = parens $ hcat (punctuate (char ',') xs) DrIFT-2.2.3/src/collect_rules.sh0000644000076400007640000000073510753606147013357 00000000000000#!/bin/sh set -e echo "-- Generated by collect_rules.sh" echo echo "module Rules(rules) where" echo for a in Rules/*; do b=$(echo $a | sed -ne 's/^Rules\/\(.*\)\.l\?hs$/\1/p') echo "import qualified Rules.$b"; done; echo echo "rules = concat" for a in Rules/*; do b=$(echo $a | sed -ne 's/^Rules\/\(.*\)\.l\?hs$/\1/p') if test -n "$seen"; then echo " ,Rules.$b.rules"; else echo " [Rules.$b.rules"; seen=1 fi done; echo " ]"; DrIFT-2.2.3/src/Rules/0000777000076400007640000000000010753607202011326 500000000000000DrIFT-2.2.3/src/Rules/Generic.hs0000644000076400007640000001333510753606147013166 00000000000000 module Rules.Generic(rules) where -- import StandardRules import RuleUtils import List(intersperse) rules :: [RuleDef] rules = [ ("ATermConvertible", atermfn, "Representation", "encode terms in the ATerm format", Nothing), ("Typeable", typeablefn, "General", "derive Typeable for Dynamic", Nothing), ("Term", dyntermfn, "Generics","Strafunski representation via Dynamic", Nothing), ("HFoldable", hfoldfn, "Generics", "Strafunski hfoldr", Nothing), ("Observable", observablefn, "Debugging", "HOOD observable", Nothing) ] -- useful helper things addPrime doc = doc <> (text "'") ppCons cv c = mkpattern (constructor c) (types c) cv namesupply = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] mknss [] _ = [] mknss (c:cs) ns = let (thisns,rest) = splitAt (length (types c)) ns in thisns: mknss cs rest mkpattern :: Constructor -> [a] -> [Doc] -> Doc mkpattern c l ns = if null l then text c else parens (hsep (text c : take (length l) ns)) instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] doublequote str = "\""++str++"\"" mkList :: [Doc] -> Doc mkList xs = text "[" <> hcat (punctuate comma xs) <> text "]" typeablefn :: Data -> Doc typeablefn dat = tcname <+> equals <+> text "mkTyCon" <+> text (doublequote $ name dat) $$ instanceheader "Typeable" dat $$ block ( [ text "typeOf x = mkTyConApp" <+> tcname <+> text "[" <+> hcat (sepWith comma (map getV' (vars dat))) <+> text "]" $$ wheres ]) where tcname = text ("_tc_" ++ (name dat) ++ "Tc") wheres = where_decls (map getV (vars dat)) tpe = text (name dat) <+> hcat (sepWith space (map text (vars dat))) getV' var = text "typeOf" <+> parens (text "get" <> text var <+> text "x") getV var = text "get" <> text var <+> text "::" <+> tpe <+> text "->" <+> text var $$ text "get" <> text var <+> equals <+> text "undefined" where_decls [] = empty where_decls ds = text " where" $$ block ds dyntermfn :: Data -> Doc dyntermfn dat = instanceheader "Term" dat $$ block [ text "explode (x::"<>a<>text ") = TermRep (toDyn x, f x, g x) where", block ( zipWith f cvs cs ++ zipWith g cvs cs )] where f cv c = text "f" <+> ppCons cv c <+> equals <+> mkList (map (text "explode" <+>) $ vrs c cv) g cv c = text "g" <+> ppCons underscores c <+> text "xs" <+> -- text "|" <+> mkList (vrs c cv) <+> text "<- TermRep.fArgs xs" <+> equals <+> text "toDyn" <+> parens (parens (text (constructor c) <+> hsep (map h (vrs c cv))) <> text "::a" ) equals <+> text "case TermRep.fArgs xs of" <+> mkList (vrs c cv) <+> text "->" <+> text "toDyn" <+> parens (parens (text (constructor c) <+> hsep (map h (vrs c cv))) <> text "::"<>a<>text "" ) <> text " ; _ -> error \"Term explosion error.\"" h n = parens $ text "TermRep.fDyn" <+> n cvs = mknss cs namesupply cs = body dat vrs c cv = take (length (types c)) cv underscores = repeat $ text "_" a = text (name dat) <+> hcat (sepWith space (map text (vars dat))) -- begin observable observablefn :: Data -> Doc observablefn dat = let cs = body dat cvs = mknss cs namesupply in instanceheader "Observable" dat $$ block (zipWith observefn cvs cs) observefn cv c = text "observer" <+> ppCons cv c <+> text "= send" <+> text (doublequote (constructor c)) <+> parens (text "return" <+> text (constructor c) <+> hsep (map f (take (length (types c)) cv))) where f n = text "<<" <+> n -- begin of ATermConvertible derivation -- Author: Joost.Visser@cwi.nl atermfn dat = instanceSkeleton "ATermConvertible" [ (makeToATerm (name dat),defaultToATerm) , (makeFromATerm (name dat),defaultFromATerm (name dat)) ] dat makeToATerm name body = let cvs = head (mknss [body] namesupply) in text "toATerm" <+> ppCons cvs body <+> text "=" <+> text "(AAppl" <+> text (doublequote (constructor body)) <+> text "[" <+> hcat (intersperse (text ",") (map childToATerm cvs)) <+> text "])" defaultToATerm = empty childToATerm v = text "toATerm" <+> v makeFromATerm name body = let cvs = head (mknss [body] namesupply) in text "fromATerm" <+> text "(AAppl" <+> text (doublequote (constructor body)) <+> text "[" <+> hcat (intersperse (text ",") cvs) <+> text "])" <+> text "=" <+> text "let" <+> vcat (map childFromATerm cvs) <+> text "in" <+> ppCons (map addPrime cvs) body defaultFromATerm name = hsep $ texts ["fromATerm", "u", "=", "fromATermError", (doublequote name), "u"] childFromATerm v = (addPrime v) <+> text "=" <+> text "fromATerm" <+> v -- end of ATermConvertible derivation -- begin of HFoldable derivation -- Author: Joost Visser and Ralf Laemmel hfoldfn dat = instanceSkeleton "HFoldable" [ (make_hfoldr (name dat), default_hfoldr), (make_conof (name dat), default_conof) ] dat make_hfoldr name body = let cvs = head (mknss [body] namesupply) in text "hfoldr'" <+> text "alg" <+> ppCons cvs body <+> text "=" <+> foldl (\rest var -> text "hcons alg" <+> var <+> parens rest) (text "hnil alg" <+> text (constructor body)) cvs default_hfoldr = empty make_conof name body = let cvs = head (mknss [body] namesupply) in text "conOf" <+> ppCons cvs body <+> text "=" <+> text (doublequote (constructor body)) default_conof = empty DrIFT-2.2.3/src/Rules/Utility.hs0000644000076400007640000000304010753606147013245 00000000000000module Rules.Utility(rules) where import RuleUtils import List import GenUtil rules :: [RuleDef] rules = [("Query",queryGen, "Utility", "provide a QueryFoo class with 'is', 'has', 'from', and 'get' routines", Nothing) ] queryGen :: Data -> Doc queryGen d@D{name = name} = cls $$ text "" $$ ins where cls = text "class" <+> text className <+> typeName <+> cargs <+> text "where" $$ block fs ot a b = a <+> text "::" <+> b cargs = if null $ vars d then empty else dargs <+> text "|" <+> typeName <+> text "->" <+> dargs dargs = hsep (map text $ vars d) className = "Query" ++ name typeName = text "_x" fs = (map is (body d) ) is Body{constructor = constructor, types = types} = fn $$ dfn $$ ffn where fnName = text $ "is" ++ constructor fromName = "from" ++ constructor fn = ot fnName $ typeName <+> rArrow <+> text "Bool" dfn = fnName <+> x <+> text "=" <+> text "isJust" <+> parens (text fromName <+> x) ffn = ot (text fromName) $ text "Monad _m =>" <+> typeName <+> rArrow <+> text "_m" <+> tuple (map prettyType types) ins = text "instance" <+> text className <+> parens (text name <+> dargs) <+> dargs <+> text "where" $$ block fromInsts fromInsts = map fi (body d) fi Body{constructor = constructor, types = types} = fn $$ dfn where fromName = "from" ++ constructor fn = text fromName <+> pattern constructor types <+> text "=" <+> text "return" <+> tuple (varNames types) dfn = text fromName <+> blank <+> equals <+> text "fail" <+> tshow fromName DrIFT-2.2.3/src/Rules/BitsBinary.hs0000644000076400007640000000676510753606147013671 00000000000000-- stub module to add your own rules. module Rules.BitsBinary(rules) where import List (nub,intersperse) import RuleUtils -- useful to have a look at this too rules = [ ("BitsBinary", userRuleBinary, "Binary", "bit based binary encoding of terms", Nothing) ] {- datatype that rules manipulate :- data Data = D { name :: Name, -- type's name constraints :: [(Class,Var)], vars :: [Var], -- Parameters body :: [Body], derives :: [Class], -- derived classes statement :: Statement} -- type of statement | Directive --| | TypeName Name --| used by derive (ignore) deriving (Eq,Show) data Body = Body { constructor :: Constructor, labels :: [Name], -- [] for a non-record datatype. types :: [Type]} deriving (Eq,Show) data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) type Name = String type Var = String type Class = String type Constructor = String type Rule = (Tag, Data->Doc) -} -- useful helper things namesupply = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] mknss [] _ = [] mknss (c:cs) ns = let (thisns,rest) = splitAt (length (types c)) ns in thisns: mknss cs rest mkpattern :: Constructor -> [a] -> [Doc] -> Doc mkpattern c l ns = if null l then text c else parens (hsep (text c : take (length l) ns)) instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] -- begin here for Binary derivation userRuleBinary dat = let cs = body dat cvs = mknss cs namesupply k = (ceiling . logBase 2 . realToFrac . length) cs in instanceheader "Binary" dat $$ block ( zipWith3 (putfn k) [0..] cvs cs ++ getfn k [0..] cvs cs : getFfn k [0..] cvs cs : zipWith (sizefn k) cvs cs ) putfn k n cv c = text "put bh" <+> ppCons cv c <+> text "= do" $$ nest 8 ( text "pos <- putBits bh" <+> text (show k) <+> text (show n) $$ vcat (map (text "put bh" <+>) cv) $$ text "return pos" ) ppCons cv c = mkpattern (constructor c) (types c) cv getfn k ns cvs cs = text "get bh = do" $$ nest 8 ( text "h <- getBits bh" <+> text (show k) $$ text "case h of" $$ nest 2 ( vcat $ zipWith3 (\n vs c-> text (show n) <+> text "-> do" $$ nest 6 ( vcat (map (\v-> v <+> text "<-" <+> text "get bh") vs) $$ text "return" <+> ppCons vs c )) ns cvs cs ++ [ text "_ -> fail \"invalid binary data found\"" ] ) ) getFfn k ns cvs cs = text "getF bh p =" <+> nest 8 ( text "let (h,p1) = getBitsF bh 1 p in" $$ text "case h of" $$ nest 2 ( vcat $ zipWith3 (\n vs c-> text (show n) <+> text "->" <+> parens (cons c <> text ",p1") <+> hsep (map (\_-> text "<< getF bh") vs)) ns cvs cs ++ [ text "_ -> fail \"invalid binary data found\"" ] ) ) where cons = text . constructor sizefn k [] c = text "sizeOf" <+> ppCons [] c <+> text "=" <+> text (show k) sizefn k cv c = text "sizeOf" <+> ppCons cv c <+> text "=" <+> text (show k) <+> text "+" <+> hsep (intersperse (text "+") (map (text "sizeOf" <+>) cv)) -- end of binary derivation DrIFT-2.2.3/src/Rules/FunctorM.hs0000644000076400007640000001735410753606147013354 00000000000000-- stub module to add your own rules. module Rules.FunctorM (rules) where import List import RuleUtils rules = [ ("FunctorM", userRuleFunctorM, "Generics", "derive reasonable fmapM implementation", Nothing), ("Functor", userRuleFunctor, "Generics", "derive reasonable Functor instance", Nothing), ("Foldable", userRuleFoldable, "Generics", "derive instance for Data.Foldable", Nothing), ("Traversable", userRuleTraversable, "Generics", "derive instance for Data.Traversable", Nothing), ("RMapM", userRuleRMapM, "Generics", "derive reasonable rmapM implementation", Nothing) ] hasVar tt t = hasType (Var tt) t hasType tt t = has t where has t | t == tt = True has (List t) = has t has (Arrow a b) = has a || has b has (LApply t ts) = any has (t:ts) has (Tuple ts) = any has (ts) has _ = False userRuleFoldable D{name = name, vars = [] } = text "--" <+> text name <> text ": Cannot derive Foldable without type variables" userRuleFoldable D{name = name, vars = vars, body=body } = ins where (tt:rt') = reverse vars rt = reverse rt' fn = if null rt then text name else parens (text name <+> hsep (map text rt)) ins = text "instance" <+> text "Foldable" <+> fn <+> text "where" $$ block fs fs = map f' $ body combine xs = if null xs then text "Data.Monoid.mempty" else hsep (intersperse (text "`Data.Monoid.mappend`") xs) f' Body{constructor=constructor, types=types} = text "foldMap" <+> f <+> pattern constructor types <+> equals <+> combine (concatMap g (zip types vnt)) where vnt = varNames types g (t,n) | not (hasVar tt t) = [] g (Var t,n) | t == tt = [parens $ f <+> n] g (List (Var t),n) | t == tt = [parens $ text "Data.Monoid.mconcat $ Prelude.map" <+> f <+> n] g (List t,n) = [parens $ text "Data.Monoid.mconcat $ Prelude.map" <+> lf t <+> n] where lf t = parens $ text "\\x ->" <+> combine (g (t,x)) g (LApply t [],n) = g (t,n) g (LApply t ts,n) = [parens $ text "Data.Foldable.foldMap" <+> f <+> n] g (Tuple ts,n) = [parens $ text "case" <+> n <+> text "of" <+> tuple (varNames ts) <+> rArrow <+> combine (concatMap g (zip ts (varNames ts)))] g _ = [] userRuleFunctor D{name = name, vars = [] } = text "--" <+> text name <> text ": Cannot derive Functor without type variables" userRuleFunctor D{name = name, vars = vars, body=body } = ins where (tt:rt') = reverse vars rt = reverse rt' fn = if null rt then text name else parens (text name <+> hsep (map text rt)) ins = text "instance" <+> text "Functor" <+> fn <+> text "where" $$ block fs fs = map f' $ body f' Body{constructor=constructor, types=types} = text "fmap" <+> text "f" <+> pattern constructor types <+> equals <+> text constructor <+> hsep (map g (zip types vnt)) where vnt = varNames types g (t,n) | not (hasVar tt t) = n g (Var t,n) | t == tt = parens $ f <+> n g (List (Var t),n) | t == tt = parens $ text "Prelude.map" <+> f <+> n g (List t,n) = parens $ text "Prelude.map" <+> lf t <+> n where lf t = parens $ text "\\x ->" <+> g (t,x) g (LApply t [],n) = g (t,n) g (LApply t ts,n) | last ts == Var tt = parens $ text "fmap" <+> f <+> n g (Tuple ts,n) = parens $ text "case" <+> n <+> text "of" <+> tuple (varNames ts) <+> rArrow <+> tuple (map g (zip ts (varNames ts))) g _ = empty userRuleFunctorM D{name = name, vars = [] } = text "--" <+> text name <> text ": Cannot derive FunctorM without type variables" userRuleFunctorM D{name = name, vars = vars, body=body } = ins where (tt:rt') = reverse vars rt = reverse rt' fn = if null rt then text name else parens (text name <+> hsep (map text rt)) ins = text "instance" <+> text "FunctorM" <+> fn <+> text "where" $$ block fs fs = map f' $ body f' Body{constructor=constructor, types=types} = text "fmapM" <+> text "f" <+> pattern constructor types <+> equals <+> text "do" <+> hcat (map g (zip types vnt)) <+> text "return $" <+> text constructor <+> hsep vnt where vnt = varNames types g (t,n) | not (hasVar tt t) = empty g (Var t,n) | t == tt = n <+> lArrow <+> text "f" <+> n <> semicolon g (List (Var t),n) | t == tt = n <+> lArrow <+> text "mapM" <+> f <+> n <> semicolon g (List t,n) = n <+> lArrow <+> text "mapM" <+> lf t <+> n <> semicolon where lf t = parens $ text "\\x ->" <+> text "do" <+> g (t,x) <+> text "return" <+> x g (LApply t [],n) = g (t,n) g (LApply t ts,n) | last ts == Var tt = n <+> lArrow <+> text "fmapM" <+> f <+> n <> semicolon g (Tuple ts,n) = n <+> lArrow <+> (parens $ text "do" <+> tuple (varNames ts) <+> lArrow <+> text "return" <+> n <> semicolon <+> hcat (map g (zip ts (varNames ts))) <> text "return" <+> tuple (varNames ts)) <> semicolon g _ = empty userRuleRMapM D{name = name, vars = vars, body=body } = ins where --(tt:rt') = reverse vars tt = if null vars then Con name else LApply (Con name) (map Var vars) rt = vars fn = if null rt then text name else parens (text name <+> hsep (map text rt)) ins = text "instance" <+> text "RMapM" <+> fn <+> text "where" $$ block fs fs = map f' $ body f' Body{constructor=constructor, types=types} = text "rmapM" <+> text "f" <+> pattern constructor types <+> equals <+> text "do" <+> hcat (map g (zip types vnt)) <+> text "return $" <+> text constructor <+> hsep vnt where vnt = varNames types g (t,n) | not (hasType tt t) = empty g ( t,n) | t == tt = n <+> lArrow <+> text "f" <+> n <> semicolon g (List (t),n) | t == tt = n <+> lArrow <+> text "mapM" <+> f <+> n <> semicolon g (List t,n) = n <+> lArrow <+> text "mapM" <+> lf t <+> n <> semicolon where lf t = parens $ text "\\x ->" <+> text "do" <+> g (t,x) <+> text "return" <+> x g (LApply t [],n) = g (t,n) g (LApply t ts,n) | last ts == tt = n <+> lArrow <+> text "fmapM" <+> f <+> n <> semicolon g (Tuple ts,n) = n <+> lArrow <+> (parens $ text "do" <+> tuple (varNames ts) <+> lArrow <+> text "return" <+> n <> semicolon <+> hcat (map g (zip ts (varNames ts))) <> text "return" <+> tuple (varNames ts)) <> semicolon g _ = empty userRuleTraversable D{name = name, vars = [] } = text "--" <+> text name <> text ": Cannot derive Traversable without type variables" userRuleTraversable D{name = name, vars = vars, body=body } = ins where (tt:rt') = reverse vars rt = reverse rt' fn = if null rt then text name else parens (text name <+> hsep (map text rt)) ins = text "instance" <+> text "Data.Traversable.Traversable" <+> fn <+> text "where" $$ block fs fs = map f' $ body combine xs = if null xs then empty else text "<$>" <+> hsep (intersperse (text "<*>") xs) f' Body{constructor=constructor, types=types} = text "traverse" <+> f <+> pattern constructor types <+> equals <+> text constructor <+> combine (map g (zip types vnt)) where vnt = varNames types g (t,n) | not (hasVar tt t) = text "Control.Applicative.pure" <+> n g (Var t,n) | t == tt = f <+> n g (List (Var t),n) | t == tt = text "traverse" <+> f <+> n g (List t,n) = text "traverse" <+> lf t <+> n where lf t = parens $ text "\\x ->" <+> g (t,x) g (LApply t [],n) = g (t,n) g (LApply t ts,n) | last ts == Var tt = text "traverse" <+> f <+> n -- g (Tuple ts,n) = (parens $ tuple (varNames ts) <+> lArrow <+> text "return" <+> n <> semicolon <+> hcat (map g (zip ts (varNames ts))) <> text "return" <+> tuple (varNames ts)) <> semicolon g (Tuple ts,n) = parens $ text "case" <+> n <+> text "of" <+> tuple (varNames ts) <+> rArrow <+> text ("(" ++ replicate (length ts - 1) ',' ++ ")") <+> combine (map g (zip ts (varNames ts))) g _ = empty DrIFT-2.2.3/src/Rules/Arbitrary.hs0000644000076400007640000000402010753606147013540 00000000000000module Rules.Arbitrary(rules) where import List import RuleUtils rules = [ ("Arbitrary", userRuleArbitrary, "Debugging", "Derive reasonable Arbitrary for QuickCheck", Nothing) ] {- datatype that rules manipulate :- data Data = D { name :: Name, -- type's name constraints :: [(Class,Var)], vars :: [Var], -- Parameters body :: [Body], derives :: [Class], -- derived classes statement :: Statement} -- type of statement | Directive --| | TypeName Name --| used by derive (ignore) deriving (Eq,Show) data Body = Body { constructor :: Constructor, labels :: [Name], -- [] for a non-record datatype. types :: [Type]} deriving (Eq,Show) data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) type Name = String type Var = String type Class = String type Constructor = String type Rule = (Tag, Data->Doc) -} -- useful helper things instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] -- begin here for Arbitrary derivation userRuleArbitrary dat@D{name = name, vars = vars, body = body } = ins where ins = instanceheader "Arbitrary" dat $$ block [arb, coarb] arb :: Doc arb = text "arbitrary" <+> equals <+> text "do" <+> vcat [text ("x <- choose (1,"++show (length body)++")"), text "case x of" $$ vcat alts] alts= zipWith alt [1..] body alt k (Body cons _ tys) = let vs = zipWith (\k _ -> "v"++show k) [1..] tys in text (" "++show k++" -> do ") <+> vcat ((map (\v -> text (v++" <- arbitrary")) vs) ++ [text ("return ("++cons++" "++concat (intersperse " " vs)++")")]) coarb = text "coarbitrary = error \"coarbitrary not yet supported\"" DrIFT-2.2.3/src/Rules/Xml.hs0000644000076400007640000003341010753606147012346 00000000000000-- expanded from stub module to add new rules. module Rules.Xml(rules) where import List (nub,sortBy) import RuleUtils -- useful to have a look at this too rules :: [RuleDef] rules = [ ("Haskell2Xml", userRuleXmlOld, "Representation" , "encode terms as XML (HaXml<=1.13)", Nothing) , ("XmlContent", userRuleXmlNew, "Representation" , "encode terms as XML (HaXml>=1.14)", Nothing) , ("Parse", userRuleTextParse, "Utility" , "parse values back from standard 'Show'" , Just "Generates the Parse class supplied in\ \ module Text.ParserCombinators.TextParser\ \ as part of HaXml>=1.14. This represents\ \ a replacement for the Prelude.Read class,\ \ with better error messages.") ] {- datatype that rules manipulate :- data Data = D { name :: Name, -- type's name constraints :: [(Class,Var)], vars :: [Var], -- Parameters body :: [Body], derives :: [Class], -- derived classes statement :: Statement} -- type of statement | Directive --| | TypeName Name --| used by derive (ignore) deriving (Eq,Show) data Body = Body { constructor :: Constructor, labels :: [Name], -- [] for a non-record datatype. types :: [Type]} deriving (Eq,Show) data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) type Name = String type Var = String type Class = String type Constructor = String type Rule = (Tag, Data->Doc) -} userRuleXmlOld dat = let cs = body dat -- constructors cvs = mknss cs namesupply -- variables in instanceheader "Haskell2Xml" dat $$ block (toHTfn cs cvs dat : ( text "fromContents (CElem (Elem constr [] cs):etc)" $$ vcat (preorder cs (zipWith readsfn cvs cs))) : zipWith3 showsfn [0..] cvs cs) userRuleXmlNew dat = let cs = body dat -- constructors cvs = mknss cs namesupply -- variables in instanceheader "HTypeable" dat $$ block [toHTfn cs cvs dat] $$ instanceheader "XmlContent" dat $$ block ( case cs of [c] -> text "parseContents = do" $$ nest 4 (text "{ inElementWith (flip isPrefixOf)" <+> text (show (constructor c)) <+> text "$" $$ parseFn True (head cvs) c $$ text "}" ) _ -> text "parseContents = do" $$ nest 4 (text "{ e@(Elem t _ _) <- elementWith (flip isPrefixOf)" <+> text (show (preorder cs (map constructor cs))) $$ text "; case t of" $$ nest 2 (text "_" $$ nest 2 (vcat (preorder cs (zipWith (parseFn False) cvs cs)))) $$ text "}" ) : zipWith3 showsfn [0..] cvs cs) toHTfn cs cvs dat = let typ = name dat fvs = vars dat pats = concat (zipWith mkpat cvs cs) in text "toHType v =" $$ nest 4 ( text "Defined" <+> fsep [ text "\"" <> text typ <> text "\"" , bracketList (map text fvs) , bracketList (zipWith toConstr cvs cs) ] ) $$ if null pats then empty else nest 2 (text "where") $$ nest 4 (vcat (map (<+> text "= v") pats)) $$ nest 4 (vcat (map (simplest typ (zip cvs cs)) fvs)) namesupply = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] mknss [] _ = [] mknss (c:cs) ns = let (thisns,rest) = splitAt (length (types c)) ns in thisns: mknss cs rest mkpat ns c = if null ns then [] else [mypattern (constructor c) (types c) ns] toConstr :: [Doc] -> Body -> Doc toConstr ns c = let cn = constructor c ts = types c fvs = nub (concatMap deepvars ts) in text "Constr" <+> fsep [ text "\"" <> text cn <> text "\"" , bracketList (map text fvs) , bracketList (map (\v-> text "toHType" <+> v) ns) ] where deepvars (Arrow t1 t2) = [] --deepvars (Apply t1 t2) = deepvars t1 ++ deepvars t2 deepvars (LApply c ts) = concatMap deepvars ts deepvars (Var s) = [s] deepvars (Con s) = [] deepvars (Tuple ts) = concatMap deepvars ts deepvars (List t) = deepvars t --first [] fv = error ("cannot locate free type variable "++fv) --first ((ns,c):cs) fv = -- let npats = [ (n,pat) | (n,t) <- zip ns (types c) -- , (True,pat) <- [ find fv t ] -- ] -- in -- if null npats then -- first cs fv -- else let (n,pat) = head npats -- in parens pat <+> text "= toHType" <+> n -- -- where -- -- find :: String -> Type -> (Bool,Doc) -- find v (Arrow t1 t2) = (False,error "can't ShowXML for arrow type") -- find v (Apply t1 t2) = let (tf1,pat1) = find v t1 -- (tf2,pat2) = find v t2 -- in perhaps (tf1 || tf2) -- (pat1 <+> snd (perhaps tf2 pat2)) -- find v (LApply c ts) = let (_,cpat) = find v c -- tfpats = map (find v) ts -- (tfs,pats) = unzip tfpats -- in perhaps (or tfs) -- (parens (cpat <+> -- bracketList (map (snd.uncurry perhaps) tfpats))) -- find v (Var s) = perhaps (v==s) (text v) -- find v (Con s) = (False, text "Defined" <+> -- text "\"" <> text s <> text "\"") -- find v (Tuple ts) = let tfpats = map (find v) ts -- (tfs,pats) = unzip tfpats -- in perhaps (or tfs) -- (parens (text "Tuple" <+> -- bracketList (map (snd.uncurry perhaps) tfpats))) -- find v (List t) = let (tf,pat) = find v t -- in perhaps tf (parens (text "List" <+> pat)) -- perhaps tf doc = if tf then (True,doc) else (False,text "_") simplest typ cs fv = let npats = [ (depth,(n,pat)) | (ns,c) <- cs , (n,t) <- zip ns (types c) , (depth, pat) <- [ find fv t ] ] (_,(n,pat)) = foldl closest (Nothing,error "free tyvar not found") npats in parens pat <+> text "= toHType" <+> n where find :: String -> Type -> (Maybe Int,Doc) find v (Arrow t1 t2) = (Nothing,error "can't derive Haskell2Xml/HTypeable for arrow type") -- find v (Apply t1 t2) = let (d1,pat1) = find v t1 -- (d2,pat2) = find v t2 -- in perhaps (combine [d1,d2]) -- (pat1 <+> snd (perhaps d2 pat2)) find v (LApply c ts) | c == (Con typ) = (Nothing, text "_") | otherwise = let (_,cpat) = find v c dpats = map (find v) ts (ds,pats) = unzip dpats in perhaps (combine ds) (cpat <+> bracketList (map (snd.uncurry perhaps) dpats) <+> text "_") find v (Var s) = perhaps (if v==s then Just 0 else Nothing) (text v) find v (Con s) = (Nothing, text "Defined" <+> text "\"" <> text s <> text "\"") find v (Tuple ts) = let dpats = map (find v) ts (ds,pats) = unzip dpats in perhaps (combine ds) (text "Tuple" <+> bracketList (map (snd.uncurry perhaps) dpats)) find v (List t) = let (d,pat) = find v t in perhaps (inc d) (text "List" <+> parens pat) perhaps Nothing doc = (Nothing, text "_") perhaps jn doc = (jn,doc) combine ds = let js = [ n | (Just n) <- ds ] in if null js then Nothing else inc (Just (minimum js)) inc Nothing = Nothing inc (Just n) = Just (n+1) closest :: (Maybe Int,a) -> (Maybe Int,a) -> (Maybe Int,a) closest (Nothing,_) b@(Just _,_) = b closest a@(Just n,_) b@(Just m,_) | n< m = a | m<=n = b closest a b = a -- showsfn (n = index) (ns = variables) (cn = constructor body) showsfn n ns cn = let cons = constructor cn typ = types cn sc = parens (text "showConstr" <+> text (show n) <+> parens (text "toHType" <+> text "v")) cfn [] = text "[]" cfn [x] = parens (text "toContents" <+> x) cfn xs = parens (text "concat" <+> bracketList (map (text "toContents" <+>) xs)) in text "toContents" <+> text "v@" <> mypattern cons typ ns <+> text "=" $$ nest 4 (text "[mkElemC" <+> sc <+> cfn ns <> text "]") ---- -- text "fromContents (CElem (Elem constr [] cs):etc)" $$ ---- -- readsfn (ns = variables) (cn = constructor body) readsfn ns cn = let cons = text (constructor cn) typ = types cn num = length ns - 1 str d = text "\"" <> d <> text "\"" trails = take num (map text [ ['c','s',y,z] | y <- ['0'..'9'] , z <- ['0'..'9'] ]) cfn x = parens (text "fromContents" <+> x) (init,[last]) = splitAt num ns something = parens ( text "\\" <> parenList [last, text "_"] <> text "->" <+> parens (cons <+> hsep ns <> text "," <+> text "etc") ) mkLambda (n,cv) z = parens ( text "\\" <> parenList [n,cv] <> text "->" <+> fsep [z, cfn cv] ) in nest 4 ( text "|" <+> str cons <+> text "`isPrefixOf` constr =" $$ nest 4 ( if null ns then parenList [cons, text "etc"] else fsep [ foldr mkLambda something (zip init trails) , cfn (text "cs")] ) ) -- Constructors are matched with "isPrefixOf" rather than "==" -- because of parametric polymorphism. For a datatype -- data A x = A | B x -- the XML tags will be , , , etc. -- However prefix-matching presents a problem for types like -- data C = C | CD -- because (C `isPrefixOf`) matches both constructors. The solution -- (implemented by "preorder") is to order the constructors such that -- is matched before . preorder cs = map snd . reverse . sortBy (\(a,_) (b,_)-> compare a b) . zip (map constructor cs) -- parseFn (ns = variables) (cn = constructor body) parseFn single ns cn = let cons = constructor cn arity = length (types cn) var v = text ";" <+> v <+> text "<- parseContents" intro = if single then empty else text "|" <+> text (show cons) <+> text "`isPrefixOf` t -> interior e $" in case arity of 0 -> intro <+> nest 8 (text "return" <+> text cons) 1 -> intro <+> nest 8 (text "fmap" <+> text cons <+> text "parseContents") _ -> intro $$ nest 8 (text "return" <+> text cons <+> (fsep (replicate arity (text "`apply` parseContents")))) -- instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] mypattern :: Constructor -> [a] -> [Doc] -> Doc mypattern c l ns = if null l then text c else parens (hsep (text c : take (length l) ns)) -- ----------------------------------------------------------------------- -- userRuleTextParse dat = let cs = body dat -- constructors cvs = mknss cs namesupply -- variables isNullary c = null (types c) in instanceheader "Parse" dat $$ nest 4 ( case cs of [] -> empty _ | all isNullary cs -> text "parse = enumeration" <+> text (show (name dat)) <+> text "[" <+> fsep ( text (constructor (head cs)) : map (\c-> text "," <+> text (constructor c)) (tail cs)) <+> text "]" | otherwise -> text "parse = constructors" $$ nest 4 (text "[" <+> textParseFn (head cvs) (head cs) $$ vcat (zipWith (\cv c-> text "," <+> textParseFn cv c) (tail cvs) (tail cs)) $$ text "]" ) ) -- textParseFn (ns = variables) (cn = constructor body) textParseFn ns cn = let cons = constructor cn arity = length (types cn) fields = labels cn doField f = text "`discard` isWord \",\" `apply` field" <+> text (show f) in fsep ( text "(" <+> text (show cons) : text "," <+> nest 2 (case arity of 0 -> text "return" <+> text cons 1 | null fields -> text "fmap" <+> text cons <+> text "parse" _ | null fields -> text "return" <+> text cons <+> (fsep (replicate arity (text "`apply` parse"))) | otherwise -> text "return" <+> text cons <+> fsep ( text "`discard` isWord \"{\" `apply` field" <+> text (show (head fields)) : map doField (tail fields) ++ [text "`discard` isWord \"}\""] ) ) : text ")" : []) -- ----------------------------------------------------------------------- -- DrIFT-2.2.3/src/Rules/Binary.hs0000644000076400007640000000455310753606147013040 00000000000000module Rules.Binary(rules) where import List (nub,intersperse) import RuleUtils rules = [ ("Binary", userRuleBinary, "Binary", "Data.Binary binary encoding of terms", Nothing) ] -- useful helper things namesupply = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] mknss [] _ = [] mknss (c:cs) ns = let (thisns,rest) = splitAt (length (types c)) ns in thisns: mknss cs rest mkpattern :: Constructor -> [a] -> [Doc] -> Doc mkpattern c l ns = if null l then text c else parens (hsep (text c : take (length l) ns)) instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] -- begin here for Binary derivation userRuleBinary dat = let cs = body dat cvs = mknss cs namesupply --k = (ceiling . logBase 256 . realToFrac . length) cs k = length cs in instanceheader "Data.Binary.Binary" dat $$ block ( zipWith3 (putfn k) [0..] cvs cs ++ [getfn k [0..] cvs cs] ) putfn 1 _ [] c = text "put" <+> ppCons [] c <+> text "= return ()" putfn 1 _ cv c = text "put" <+> ppCons cv c <+> text "= do" $$ nest 8 ( vcat (map (text "Data.Binary.put" <+>) cv) ) putfn _ n cv c = text "put" <+> ppCons cv c <+> text "= do" $$ nest 8 ( text "Data.Binary.putWord8" <+> text (show n) $$ vcat (map (text "Data.Binary.put" <+>) cv) ) ppCons cv c = mkpattern (constructor c) (types c) cv getfn _ _ [[]] [c] = text "get = return" <+> ppCons [] c getfn _ _ [vs] [c] = text "get = do" $$ vcat (map (\v-> v <+> text "<-" <+> text "get") vs) $$ text "return" <+> ppCons vs c getfn _ ns cvs cs = text "get = do" $$ nest 8 ( text "h <- Data.Binary.getWord8" $$ text "case h of" $$ nest 2 ( vcat $ zipWith3 (\n vs c-> text (show n) <+> text "-> do" $$ nest 6 ( vcat (map (\v-> v <+> text "<-" <+> text "Data.Binary.get") vs) $$ text "return" <+> ppCons vs c )) ns cvs cs ++ [ text "_ -> fail \"invalid binary data found\"" ] ) ) DrIFT-2.2.3/src/Rules/GhcBinary.hs0000644000076400007640000000636310753606147013463 00000000000000-- stub module to add your own rules. module Rules.GhcBinary (rules) where import List (nub,intersperse) import RuleUtils -- useful to have a look at this too rules = [ ("GhcBinary", userRuleGhcBinary, "Binary", "byte oriented binary encoding compatable withoriented binary encoding compatable with oriented binary encoding compatable with older binary libraries", Nothing) ] {- datatype that rules manipulate :- data Data = D { name :: Name, -- type's name constraints :: [(Class,Var)], vars :: [Var], -- Parameters body :: [Body], derives :: [Class], -- derived classes statement :: Statement} -- type of statement | Directive --| | TypeName Name --| used by derive (ignore) deriving (Eq,Show) data Body = Body { constructor :: Constructor, labels :: [Name], -- [] for a non-record datatype. types :: [Type]} deriving (Eq,Show) data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) type Name = String type Var = String type Class = String type Constructor = String type Rule = (Tag, Data->Doc) -} -- useful helper things namesupply = [text [x,y] | x <- ['a' .. 'z'], y <- ['a' .. 'z'] ++ ['A' .. 'Z']] mknss [] _ = [] mknss (c:cs) ns = let (thisns,rest) = splitAt (length (types c)) ns in thisns: mknss cs rest mkpattern :: Constructor -> [a] -> [Doc] -> Doc mkpattern c l ns = if null l then text c else parens (hsep (text c : take (length l) ns)) instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] -- begin here for Binary derivation userRuleGhcBinary dat = let cs = body dat cvs = mknss cs namesupply --k = (ceiling . logBase 256 . realToFrac . length) cs k = length cs in instanceheader "Binary" dat $$ block ( zipWith3 (putfn k) [0..] cvs cs ++ [getfn k [0..] cvs cs] ) putfn 1 _ [] c = text "put_ _" <+> ppCons [] c <+> text "= return ()" putfn 1 _ cv c = text "put_ bh" <+> ppCons cv c <+> text "= do" $$ nest 8 ( vcat (map (text "put_ bh" <+>) cv) ) putfn _ n cv c = text "put_ bh" <+> ppCons cv c <+> text "= do" $$ nest 8 ( text "putByte bh" <+> text (show n) $$ vcat (map (text "put_ bh" <+>) cv) -- $$ --text "return pos" ) ppCons cv c = mkpattern (constructor c) (types c) cv getfn _ _ [[]] [c] = text "return" <+> ppCons [] c getfn _ _ [vs] [c] = text "get bh = do" $$ vcat (map (\v-> v <+> text "<-" <+> text "get bh") vs) $$ text "return" <+> ppCons vs c getfn _ ns cvs cs = text "get bh = do" $$ nest 8 ( text "h <- getByte bh" $$ text "case h of" $$ nest 2 ( vcat $ zipWith3 (\n vs c-> text (show n) <+> text "-> do" $$ nest 6 ( vcat (map (\v-> v <+> text "<-" <+> text "get bh") vs) $$ text "return" <+> ppCons vs c )) ns cvs cs ++ [ text "_ -> fail \"invalid binary data found\"" ] ) ) -- end of binary derivation DrIFT-2.2.3/src/Rules/Monoid.hs0000644000076400007640000000404510753606147013035 00000000000000-- stub module to add your own rules. module Rules.Monoid (rules) where import List import RuleUtils rules = [ ("Monoid", userRuleMonoid, "Generics", "derive reasonable Data.Monoid implementation", Nothing) ] {- datatype that rules manipulate :- data Data = D { name :: Name, -- type's name constraints :: [(Class,Var)], vars :: [Var], -- Parameters body :: [Body], derives :: [Class], -- derived classes statement :: Statement} -- type of statement | Directive --| | TypeName Name --| used by derive (ignore) deriving (Eq,Show) data Body = Body { constructor :: Constructor, labels :: [Name], -- [] for a non-record datatype. types :: [Type]} deriving (Eq,Show) data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) type Name = String type Var = String type Class = String type Constructor = String type Rule = (Tag, Data->Doc) -} -- useful helper things mkpattern :: Constructor -> [Doc] -> Doc mkpattern c ns = if null ns then text c else parens (hsep (text c : ns)) instanceheader cls dat = let fv = vars dat tycon = name dat ctx = map (\v-> text cls <+> text v) parenSpace = parens . hcat . sepWith space in hsep [ text "instance" , opt fv (\v -> parenList (ctx v) <+> text "=>") , text cls , opt1 (texts (tycon: fv)) parenSpace id , text "where" ] -- begin here for Binary derivation userRuleMonoid dat@D{name = name, vars = vars, body=[body] } = ins where ins = instanceheader "Monoid" dat $$ block [me, ma] me, ma :: Doc me = text "mempty" <+> equals <+> text (constructor body) <+> hsep (replicate lt (text "mempty")) ma = text "mappend" <+> mkpattern c (varNames ty) <+> mkpattern c (varNames' ty) <+> equals <+> text c <+> hcat (zipWith f (varNames ty) (varNames' ty)) f a b = parens $ text "mappend" <+> a <+> b c = constructor body ty = types body lt = length (types body) userRuleMonoid D{name = name } = text "--" <+> text name <> text ": Cannot derive Monoid from type" DrIFT-2.2.3/src/Rules/Standard.hs0000644000076400007640000003427610753606147013361 00000000000000module Rules.Standard(rules) where import RuleUtils import List import GenUtil --- Add Rules Below Here ---------------------------------------------------- rules :: [RuleDef] rules = [("test",dattest, "Utility", "output raw data for testing", Nothing), ("update",updatefn, "Utility","for label 'foo' provides 'foo_u' to update it and foo_s to set it", Nothing ), ("is",isfn, "Utility", "provides isFoo for each constructor", Nothing), ("get",getfn, "Utility", "for label 'foo' provide foo_g to get it", Nothing), ("from",fromfn, "Utility", "provides fromFoo for each constructor", Nothing), ("has",hasfn, "Utility", "hasfoo for record types", Nothing), ("un",unfn, "Utility", "provides unFoo for unary constructors", Nothing), ("NFData",nffn, "General","provides 'rnf' to reduce to normal form (deepSeq)", Nothing ), ("Eq",eqfn, "Prelude","", Nothing), ("Ord",ordfn, "Prelude", "", Nothing), ("Enum",enumfn, "Prelude", "", Nothing), ("Show",showfn, "Prelude", "", Nothing), ("Read",readfn, "Prelude", "", Nothing), ("Bounded",boundedfn, "Prelude", "", Nothing)] ----------------------------------------------------------------------------- -- NFData - This class provides 'rnf' to reduce to normal form. -- This has a default for non-constructed datatypes -- Assume that base cases have been defined for lists, functions, and -- (arbitrary) tuples - makeRnf produces a function which applies rnf to -- each of the combined types in each constructor of the datatype. (If -- this isn't very clear, just look at the code to figure out what happens) nffn = instanceSkeleton "NFData" [(makeRnf,empty)] makeRnf :: IFunction makeRnf (Body{constructor=constructor,types=types}) | null types = text "rnf" <+> fsep [pattern constructor [],equals,text "()"] | otherwise = let vars = varNames types head = [pattern constructor vars, equals] body = sepWith (text "`seq`") . map (text "rnf" <+>) $ vars in text "rnf" <+> fsep (head ++ body) ----------------------------------------------------------------------------- -- Forming 'update' functions for each label in a record -- -- for a datatype G, where label has type G -> a -- the corresponding update fn has type (a -> a) -> G -> G -- The update fn has the same name as the label with _u appended -- an example of what we want to generate -- --> foo_u f d{foo}=d{foo = f foo} -- -- labels can be common to more than one constructor in a type. -- this -- is a problem, and the reason why a sort is used. updatefn :: Data -> Doc updatefn d@(D{body=body,name=name}) | hasRecord d = vcat (updates ++ sets) | otherwise = commentLine $ text "Warning - can't derive `update' functions for non-record type: " <+> text name where nc = length body labs = gf $ sort . concatMap f $ body updates = map genup labs -- $$ hsep [text (n ++ "_u"), char '_', char 'x', equals, char 'x'] sets = map genset . nub . map fst $ labs f :: Body -> [(Name,Constructor)] f (Body{constructor=constructor,labels=labels}) = zip (filter (not . null) labels ) (repeat constructor) gf ts = map (\ts -> (fst (head ts), snds ts)) (groupBy (\(a,_) (b,_) -> a == b) (sort ts)) genup :: (Name,[Constructor]) -> Doc genup (n,cs) = vcat (map up cs) $$ up' where up c = hsep [text (n ++ "_u") , char 'f' , char 'r' <> char '@' <> text c <> braces (text n <+> text " = x") , equals , char 'r' <> braces (hsep [text n, text "= f x"])] up' | nc > length cs = hsep [text (n ++ "_u"), char '_', char 'x', equals, char 'x'] | otherwise = empty -- while we're at it, may as well define a set function too... genset :: Name -> Doc genset n = hsep [text (n ++ "_s v = "), text (n ++ "_u"), text " (const v)"] getfn :: Data -> Doc getfn d@(D{body=body,name=name}) | hasRecord d = vcat (updates ++ sets) | otherwise = commentLine $ text "Warning - can't derive `get' functions for non-record type: " <+> text name where nc = length body labs = gf $ sort . concatMap f $ body updates = map genup labs sets = map genset . nub . map fst $ labs f :: Body -> [(Name,Constructor)] f (Body{constructor=constructor,labels=labels}) = zip (filter (not . null) labels ) (repeat constructor) gf ts = map (\ts -> (fst (head ts), snds ts)) (groupBy (\(a,_) (b,_) -> a == b) (sort ts)) genup :: (Name,[Constructor]) -> Doc genup (n,cs) = vcat (map up cs) $$ up' where fn = n ++ "_g" up c = hsep [text fn , char 'r' <> char '@' <> text c <> braces (text n <+> text " = x") , equals , text "return x"] up' | nc > length cs = hsep [text fn, char '_', equals, text "fail", tshow fn] | otherwise = empty -- while we're at it, may as well define a set function too... genset :: Name -> Doc genset n = hsep [text (n ++ "_s v = "), text (n ++ "_u"), text " (const v)"] ---------------------------------------------------------------------- -- Similar rules to provide predicates for the presence of a constructor / label isfn :: Data -> Doc isfn (D{body=body}) = vcat (map is body) where is Body{constructor=constructor,types=types} = let fnName = text ("is" ++ constructor) fn = fnName <+> hsep [pattern_ constructor types,text "=",text "True"] defaultFn = fnName <+> hsep (texts ["_","=","False"]) in fn $$ defaultFn fromfn :: Data -> Doc fromfn (D{body=body}) = vcat (map from body) where from Body{constructor=constructor,types=types} = fn $$ defaultFn where fnName = ("from" ++ constructor) fnName' = text fnName fn = fnName' <+> hsep [pattern constructor types,text "=",text "return", tuple (varNames types) ] defaultFn = fnName' <+> hsep (texts ["_","=","fail",show fnName ]) hasfn :: Data -> Doc hasfn d@(D{body=body,name=name}) | hasRecord d = vcat [has l b | l <- labs, b <- body] | otherwise = commentLine $ text "Warning - can't derive `has' functions for non-record type:" <+> text name where has lab Body{constructor=constructor,labels=labels} = let bool = text . show $ lab `elem` labels pattern = text (constructor ++ "{}") fnName = text ( "has" ++ lab) in fsep[fnName, pattern, text "=", bool] labs = nub . concatMap (labels) $ body -- Function to make using newtypes a bit nicer. -- for newtype N = T a , unN :: T -> a unfn :: Data -> Doc unfn (D{body=body,name=name,statement=statement}) | statement == DataStmt = commentLine $ text "Warning - can't derive 'un' function for data declaration " <+> text name | otherwise = let fnName = text ("un" ++ name) b = head body pattern = parens $ text (constructor b) <+> text "a" in fsep [fnName,pattern, equals, text "a"] ----------------------------------------------------------------------------- -- A test rule for newtypes datastructures - just outputs -- parsed information. Can put {-! global : Test !-} in an input file, and output -- from the entire file should be generated. dattest d = commentBlock . vcat $ [text (name d) , fsep . texts . map show $ constraints d , fsep . texts . map show $ vars d , fsep . texts . map show $ body d , fsep . texts . map show $ derives d , text . show $statement d] ------------------------------------------------------------------------------ -- Rules for the derivable Prelude Classes -- Eq eqfn = instanceSkeleton "Eq" [(makeEq,defaultEq)] makeEq :: IFunction makeEq (Body{constructor=constructor,types=types}) | null types = hsep $ texts [constructor,"==",constructor, "=", "True"] | otherwise = let v = varNames types v' = varNames' types d x = parens . hsep $ text constructor : x head = [ text "==", d v', text "="] body = sepWith (text "&&") $ zipWith (\x y -> (x <+> text "==" <+> y)) v v' in d v <+> fsep (head ++ body) defaultEq = hsep $ texts ["_", "==", "_", "=" ,"False"] ---------------------------------------------------------------------- -- Ord ordfn d = let ifn = [f c c' | c <- zip (body d) [1 ..] , c' <- zip (body d) [1 ..]] cmp n n' = show $ compare n n' f (b,n) (b',n') | null (types b) = text "compare" <+> fsep [text (constructor b), pattern (constructor b') (types b') , char '=', text $ cmp n n' ] | otherwise = let head = fsep [l,r, char '='] l = pattern (constructor b) (types b) r = pattern' (constructor b') (types b') one x y = fsep [text "compare",x,y] list [x] [y] = one x y list xs ys = fsep [text "foldl", parens fn, text "EQ", bracketList (zipWith one xs ys)] fn = fsep $ texts ["\\x y", "->", "if", "x", "==","EQ", "then", "compare", "y", "EQ", "else", "x"] in if constructor b == constructor b' then text "compare" <+> fsep [head, list (varNames $ types b) (varNames' $ types b')] else text "compare" <+> fsep [head,text (cmp n n')] in simpleInstance "Ord" d <+> text "where" $$ block ifn ---------------------------------------------------------------------- -- Show & Read -- won't work for infix constructors -- (and anyway, neither does the parser currently) -- -- Show showfn = instanceSkeleton "Show" [(makeShow,empty)] makeShow :: IFunction makeShow (Body{constructor=constructor,labels=labels,types=types}) | null types = fnName <+> fsep [headfn,showString constructor] | null labels = fnName <+> fsep [headfn,bodyStart, body] -- datatype | otherwise = fnName <+> fsep[headfn,bodyStart,recordBody] -- record where fnName = text "showsPrec" headfn = fsep [char 'd',(pattern constructor types),equals] bodyStart = fsep [text "showParen",parens (text "d >= 10")] body = parens . fsep $ sepWith s (c : b) recordBody = parens $ fsep [c,comp,showChar '{',comp, fsep (sepWith s' b'),comp,showChar '}'] c = showString constructor b = map (\x -> fsep[text "showsPrec", text "10", x]) (varNames types) b' = zipWith (\x l -> fsep[showString l,comp,showChar '=',comp,x]) b labels s = fsep [comp,showChar ' ', comp] s' = fsep [comp,showChar ',',comp] showChar c = fsep [text "showChar", text ('\'':c:"\'")] showString s = fsep[ text "showString", doubleQuotes $ text s] comp = char '.' -- Read readfn d = simpleInstance "Read" d <+> text "where" $$ readsPrecFn d readsPrecFn d = let fnName = text "readsPrec" bodies = vcat $ sepWith (text "++") (map makeRead (body d)) in nest 4 $ fnName <+> fsep[char 'd', text "input", equals,bodies] makeRead :: IFunction makeRead (Body{constructor=constructor,labels=labels,types=types}) | null types = fsep [read0,text "input"] | null labels = fsep [headfn,read,text "input"] | otherwise = fsep [headfn,readRecord, text "input"] where headfn = fsep [text "readParen", parens (text "d > 9")] read0 = lambda $ listComp (result rest) [lexConstr rest] read = lambda . listComp (result rest) $ lexConstr ip : ( map f (init vars) ) ++ final (last vars) f v = fsep [tup v ip, from,readsPrec, ip] final v = [fsep[tup v rest,from,readsPrec,ip]] readRecord = let f lab v = [ fsep [tup (text $ show lab) ip,lex], fsep [tup (text $ show "=") ip,lex], fsep [tup v ip ,from,readsPrec,ip]] openB = fsep [tup (text $ show "{") ip,lex] closeB = fsep [tup (text $ show "}") rest,lex] comma = [fsep [tup (text $ show ",") ip,lex]] in lambda . listComp (result rest) $ lexConstr ip : openB : (concat . sepWith comma) (zipWith f labels vars) ++ [closeB] lambda x = parens ( fsep [text "\\",ip,text "->",x]) listComp x (l:ll) = brackets . fsep . sepWith comma $ ((fsep[x, char '|', l]) : ll) result x = tup (pattern constructor vars) x lexConstr x = fsep [tup (text $ show constructor) x, lex] -- nifty little bits of syntax vars = varNames types ip = text "inp" rest = text "rest" tup x y = parens $ fsep [x, char ',',y] lex = fsep[from,text "lex",ip] readsPrec = fsep [text "readsPrec",text "10"] from = text "<-" ---------------------------------------------------------------------- -- Enum -- a lot of this code should be provided as default instances, -- but currently isn't enumfn d = let fromE = fromEnumFn d toE = toEnumFn d eFrom = enumFromFn d in if any (not . null . types) (body d) then commentLine $ text "Warning -- can't derive Enum for" <+> text (name d) else simpleInstance "Enum" d <+> text "where" $$ block (fromE ++ toE ++ [eFrom,enumFromThenFn]) fromEnumFn :: Data -> [Doc] fromEnumFn (D{body=body}) = map f (zip body [0 ..]) where f (Body{constructor=constructor},n) = text "fromEnum" <+> (fsep $ texts [constructor , "=", show n]) toEnumFn :: Data -> [Doc] toEnumFn (D{body=body}) = map f (zip body [0 ..]) where f (Body{constructor=constructor},n) = text "toEnum" <+> (fsep $ texts [show n , "=", constructor]) enumFromFn :: Data -> Doc enumFromFn D{body=body} = let conList = bracketList . texts . map constructor $ body bodydoc = fsep [char 'e', char '=', text "drop", parens (text "fromEnum" <+> char 'e'), conList] in text "enumFrom" <+> bodydoc enumFromThenFn :: Doc enumFromThenFn = let wrapper = fsep $ texts ["i","j","=","enumFromThen\'","i","j","(", "enumFrom", "i", ")"] eq1 = text "enumFromThen\'" <+> fsep (texts ["_","_","[]","=","[]"]) eq2 = text "enumFromThen\'" <+> fsep ( texts ["i","j","(x:xs)","=", "let","d","=","fromEnum","j","-","fromEnum","i","in", "x",":","enumFromThen\'","i","j","(","drop","(d-1)","xs",")"]) in text "enumFromThen" <+> wrapper $$ block [text "where",eq1,eq2] ---------------------------------------------------------------------- -- Bounded - as if anyone uses this one :-) .. boundedfn d@D{name=name,body=body,derives=derives} | all (null . types) body = boundedEnum d | singleton body = boundedSingle d | otherwise = commentLine $ text "Warning -- can't derive Bounded for" <+> text name boundedEnum d@D{body=body} = let f = constructor . head $ body l = constructor . last $ body in simpleInstance "Bounded" d <+> text "where" $$ block [ hsep (texts[ "minBound","=",f]), hsep (texts[ "maxBound","=",l])] boundedSingle d@D{body=body} = let f = head $ body in simpleInstance "Bounded" d <+> text "where" $$ block [ hsep . texts $ [ "minBound","=",constructor f] ++ replicate (length (types f)) "minBound", hsep . texts $ [ "maxBound","=",constructor f] ++ replicate (length (types f)) "maxBound"] singleton [x] = True singleton _ = False DrIFT-2.2.3/src/Rules.hs0000644000076400007640000000116410753606664011613 00000000000000-- Generated by collect_rules.sh module Rules(rules) where import qualified Rules.Arbitrary import qualified Rules.Binary import qualified Rules.BitsBinary import qualified Rules.FunctorM import qualified Rules.Generic import qualified Rules.GhcBinary import qualified Rules.Monoid import qualified Rules.Standard import qualified Rules.Utility import qualified Rules.Xml rules = concat [Rules.Arbitrary.rules ,Rules.Binary.rules ,Rules.BitsBinary.rules ,Rules.FunctorM.rules ,Rules.Generic.rules ,Rules.GhcBinary.rules ,Rules.Monoid.rules ,Rules.Standard.rules ,Rules.Utility.rules ,Rules.Xml.rules ] DrIFT-2.2.3/src/DrIFT.hs0000644000076400007640000001320710753606147011426 00000000000000-- Based on DrIFT 1.0 by Noel Winstanley -- hacked for Haskell 98 by Malcolm Wallace, University of York, Feb 1999. -- modified by various people, now maintained by John Meacham module Main(main) where import ChaseImports import DataP import GenUtil import GetOpt import Char import IO hiding(try) import List (partition,isSuffixOf,sort, groupBy, sortBy) import Monad(unless) import PreludData(preludeData) import Pretty import RuleUtils (commentLine,texts) import RuleUtils(Rule,Tag) import Version import qualified Rules(rules) import qualified System data Op = OpList | OpDerive | OpVersion data Env = Env { envVerbose :: Bool, envOutput :: (Maybe String), envOperation :: Op, envNoline :: Bool, envArgs :: [(String,String)], envResultsOnly :: Bool, envGlobalRules :: [Tag], envIgnoreDirectives :: Bool } env = Env { envVerbose = False, envOutput = Nothing, envOperation = OpDerive, envNoline = False, envArgs = [], envResultsOnly = False, envIgnoreDirectives = False, envGlobalRules = [] } getOutput e = maybe (return stdout) (\fn -> openFile fn WriteMode) (envOutput e) options :: [OptDescr (Env -> Env)] options = [ Option ['v'] ["verbose"] (NoArg (\e->e{envVerbose = True})) "chatty output on stderr" , Option ['V'] ["version"] (NoArg (\e->e{envOperation = OpVersion})) "show version number" , Option ['l'] ["list"] (NoArg (\e->e{envOperation = OpList})) "list available derivations" , Option ['L'] ["noline"] (NoArg (\e->e{envNoline = True})) "omit line pragmas from output" , Option ['o'] ["output"] (ReqArg (\x e->e{envOutput = (Just x)}) "FILE") "output FILE" , Option ['s'] ["set"] (ReqArg setArg "name:value") "set argument to value" , Option ['r'] ["resultsonly"] (NoArg (\e->e{envResultsOnly = True})) "output only results, do not include source file" , Option ['g'] ["global"] (ReqArg addGlobalRule "rule") "addition rules to apply globally" , Option ['i'] ["ignore"] (NoArg (\e->e{envIgnoreDirectives = True})) "ignore directives in file. useful with -g" ] setArg x e = e {envArgs = (n, tail rest):(envArgs e)} where (n,rest) = span (/= ':') x addGlobalRule x e = e {envGlobalRules = x:(envGlobalRules e)} categorize :: Ord c => [(c,a)] -> [(c,[a])] categorize xs = map f $ groupBy fstEq $ sortBy fstOrd xs where f ys = (fst (head ys),snds ys) fstEq (a,_) (b,_) = a == b fstOrd (a,_) (b,_) = compare a b doList = do let rn = categorize [(c,(n,h)) | (n,_,c,h,_) <- Rules.rules] putStrLn $ unlines $ buildTableLL $ concat [ (c ++ ":","") : (map (\(a,b) -> (" " ++ a, b)) $ sort xs)| (c,xs)<- rn] header = "Usage: DrIFT [OPTION...] file" main = do argv <- System.getArgs (env,n) <- case (getOpt Permute options argv) of (as,n,[]) -> return (foldr ($) env as ,n) (_,_,errs) -> putErrDie (concat errs ++ usageInfo header options) case env of Env { envOperation = OpList } -> doList Env { envOperation = OpVersion} -> putStr ("Version " ++ fullName ++ "\n") _ -> case n of [n] -> derive env n _ -> putErrDie ("single input file must be specified.\n" ++ usageInfo header options) derive env fname = do let findent xs = f (lines xs) where f (x:xs) = let (w,n) = span isSpace x in case n of (c:_) | isAlpha c -> length w _ -> f xs f [] = 0 file <- readFile fname let (body,_) = userCode file b = ".lhs" `isSuffixOf` fname zz = fromLit b body ss = if b then replicate (findent zz) ' ' else "" handle <- getOutput env hPutStr handle $ ss ++ "{- Generated by " ++ package ++ " (Automatic class derivations for Haskell) -}\n" unless (envNoline env) $ hPutStr handle $ ss ++ "{-# LINE 1 \"" ++ fname ++ "\" #-}\n" let (docs,dats,todo) = process . addGlobals env . parser $ zz moreDocs <- fmap ((\(x,_,_) -> x) . process) (chaseImports body todo) let result = (\r -> codeSeperator ++ '\n':r) . render . vsep $ (docs ++ sepDoc:moreDocs) if (envResultsOnly env) then hPutStr handle result else do hPutStr handle zz hPutStr handle $ unlines . map (ss ++) . lines $ result hFlush handle addGlobals env tds = (envGlobalRules env,Directive):concatMap f tds where f x | not (envIgnoreDirectives env) = [x] f (_,Directive) = [] f (_,TypeName _) = [] f (_,d) = [([],d)] rules = map (\(a,b,_,_,_) -> (a,b)) Rules.rules -- codeRender doc = fullRender PageMode 80 1 doc "" -- now obsolete vsep = vcat . map ($$ (text "")) sepDoc = commentLine . text $ " Imported from other files :-" backup :: FilePath -> FilePath backup f = (reverse . dropWhile (/= '.') . reverse ) f ++ "bak" newfile :: FilePath -> FilePath newfile f = (reverse . dropWhile (/= '.') . reverse ) f ++ "DrIFT" -- Main Pass - Takes parsed data and rules and combines to create instances... -- Returns all parsed data, ande commands calling for files to be imported if -- datatypes aren't located in this module. process :: ToDo -> ([Doc],[Data],ToDo) process i = (concatMap g dats ++ concatMap h moreDats,parsedData,imports) where g (tags,d) = [(find t rules) d | t <- (tags ++ directives)] h (tags,d) = [(find t rules) d | t <- tags] directives = concatMap fst globals (dats,commands) = partition (isData . snd) i (globals,fors) = partition (\(_,d) -> d == Directive) commands (moreDats,imports) = resolve parsedData fors ([],[]) parsedData = map snd dats ++ preludeData find :: Tag -> [Rule] -> (Data -> Doc) find t r = case filter ((==t) . fst) $ r of [] -> const (commentLine warning) (x:xs) -> snd x where warning = hsep . texts $ ["Warning : Rule",t,"not found."] DrIFT-2.2.3/src/Unlit.hs0000644000076400007640000000642710753606147011617 00000000000000module Unlit(unlit) where -- Part of the following code is from -- "Report on the Programming Language Haskell", -- version 1.2, appendix C. import Char data Classified = Program String | Blank | Comment | Include Int String | Pre String classify :: [String] -> [Classified] classify [] = [] classify (('\\':x):xs) | x == "begin{code}" = Blank : allProg xs where allProg [] = [] -- Should give an error message, -- but I have no good position information. allProg (('\\':x):xs) | x == "end{code}" = Blank : classify xs allProg (x:xs) = Program x:allProg xs classify (('>':x):xs) = Program (' ':x) : classify xs classify (('#':x):xs) = (case words x of (line:file:_) | all isDigit line -> Include (read line) file _ -> Pre x ) : classify xs classify (x:xs) | all isSpace x = Blank:classify xs classify (x:xs) = Comment:classify xs unclassify :: Classified -> String unclassify (Program s) = s unclassify (Pre s) = '#':s unclassify (Include i f) = '#':' ':show i ++ ' ':f unclassify Blank = "" unclassify Comment = "" -- | Remove literate comments leaving normal haskell source. unlit :: String -- ^ Filename for error messages -> String -- ^ literate source -> String -- ^ deliterated source unlit file lhs = (unlines . map unclassify . adjecent file (0::Int) Blank . classify) (inlines lhs) adjecent :: String -> Int -> Classified -> [Classified] -> [Classified] adjecent file 0 _ (x :xs) = x : adjecent file 1 x xs -- force evaluation of line number adjecent file n y@(Program _) (x@Comment :xs) = error (message file n "program" "comment") adjecent file n y@(Program _) (x@(Include i f):xs) = x: adjecent f i y xs adjecent file n y@(Program _) (x@(Pre _) :xs) = x: adjecent file (n+1) y xs adjecent file n y@Comment (x@(Program _) :xs) = error (message file n "comment" "program") adjecent file n y@Comment (x@(Include i f):xs) = x: adjecent f i y xs adjecent file n y@Comment (x@(Pre _) :xs) = x: adjecent file (n+1) y xs adjecent file n y@Blank (x@(Include i f):xs) = x: adjecent f i y xs adjecent file n y@Blank (x@(Pre _) :xs) = x: adjecent file (n+1) y xs adjecent file n _ (x@next :xs) = x: adjecent file (n+1) x xs adjecent file n _ [] = [] message "\"\"" n p c = "Line "++show n++": "++p++ " line before "++c++" line.\n" message [] n p c = "Line "++show n++": "++p++ " line before "++c++" line.\n" message file n p c = "In file " ++ file ++ " at line "++show n++": "++p++ " line before "++c++" line.\n" -- Re-implementation of 'lines', for better efficiency (but decreased laziness). -- Also, importantly, accepts non-standard DOS and Mac line ending characters. inlines s = lines' s id where lines' [] acc = [acc []] lines' ('\^M':'\n':s) acc = acc [] : lines' s id -- DOS lines' ('\^M':s) acc = acc [] : lines' s id -- MacOS lines' ('\n':s) acc = acc [] : lines' s id -- Unix lines' (c:s) acc = lines' s (acc . (c:)) DrIFT-2.2.3/src/Version.hs0000644000076400007640000000017610753606424012142 00000000000000module Version(package, version, fullName) where package = "DrIFT" version = "2.2.3" fullName = package ++ "-" ++ version DrIFT-2.2.3/src/Makefile.in0000644000076400007640000002671310753606417012235 00000000000000# Makefile.in generated by automake 1.10 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : bin_PROGRAMS = DrIFT$(EXEEXT) subdir = src DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/Version.hs.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/ac-macros/acincludepackage.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_CLEAN_FILES = Version.hs am__installdirs = "$(DESTDIR)$(bindir)" binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) am__objects_1 = am_DrIFT_OBJECTS = $(am__objects_1) DrIFT_OBJECTS = $(am_DrIFT_OBJECTS) DrIFT_LDADD = $(LDADD) DEFAULT_INCLUDES = -I.@am__isrc@ depcomp = am__depfiles_maybe = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ SOURCES = $(DrIFT_SOURCES) DIST_SOURCES = $(DrIFT_SOURCES) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ HC = @HC@ HCFLAGS = @HCFLAGS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SH = @SH@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ am__leading_dot = @am__leading_dot@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = no-dependencies foreign subdir-objects RULES = Rules/Arbitrary.hs Rules/Binary.hs Rules/BitsBinary.hs Rules/FunctorM.hs \ Rules/Generic.hs Rules/GhcBinary.hs Rules/Monoid.hs Rules/Standard.hs \ Rules/Utility.hs Rules/Xml.hs DrIFT_SOURCES = ${RULES} ChaseImports.hs CommandP.hs DataP.lhs DrIFT.hs GetOpt.hs \ Unlit.hs ParseLib2.hs PreludData.hs Pretty.lhs RuleUtils.hs Version.hs GenUtil.hs # DrIFT_static_SOURCES = $(DrIFT_SOURCES) # DrIFT_static_LINK = $(DrIFT_LINK) -static -optl-static -ldl EXTRA_DIST = Rules.hs collect_rules.sh # EXTRA_PROGRAMS = DrIFT_static BUILT_SOURCES = Rules.hs DrIFT_LINK = $(HC) $(HCFLAGS) -o $@ SUFFIXES = .hs .lhs .o .hi #depend: # ghc -M -optdep-f -optdepdepend.make DrIFT.hs #depend.make: # ghc -M -optdep-f -optdepdepend.make DrIFT.hs BINDISTNAME = $(PACKAGE)-$(VERSION)-`uname -s`-`uname -m` CLEANFILES = Rules/*.o Rules/*.hi all: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) all-am .SUFFIXES: .SUFFIXES: .hs .lhs .o .hi $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign --ignore-deps src/Makefile'; \ cd $(top_srcdir) && \ $(AUTOMAKE) --foreign --ignore-deps src/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh Version.hs: $(top_builddir)/config.status $(srcdir)/Version.hs.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_PROGRAMS)'; for p in $$list; do \ p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ if test -f $$p \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; for p in $$list; do \ f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ rm -f "$(DESTDIR)$(bindir)/$$f"; \ done clean-binPROGRAMS: -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ fi; \ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ else \ test -f $(distdir)/$$file \ || cp -p $$d/$$file $(distdir)/$$file \ || exit 1; \ fi; \ done check-am: all-am check: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) check-am all-am: Makefile $(PROGRAMS) installdirs: for dir in "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) clean-am: clean-binPROGRAMS clean-generic mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-local dvi: dvi-am dvi-am: html: html-am info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-exec-am: install-binPROGRAMS install-html: install-html-am install-info: install-info-am install-man: install-pdf: install-pdf-am install-ps: install-ps-am installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-binPROGRAMS \ clean-generic distclean distclean-compile distclean-generic \ distclean-local distdir dvi dvi-am html html-am info info-am \ install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am \ uninstall-binPROGRAMS Rules.hs: collect_rules.sh sh ./collect_rules.sh > $@ # COMPILE = $(HC) $(HCFLAGS) # all: DrIFT DrIFT: $(DrIFT_SOURCES) $(nodist_DrIFT_SOURCES) $(HC) $(HCFLAGS) -i. -i@srcdir@ -hidir . -odir . -o $@ --make @srcdir@/DrIFT.hs #.hs.o: # $(HC) -O -i. -i@srcdir@ $(HCFLAGS) -hidir . -o $@ -c $< #.lhs.o: # $(HC) -O -i. -i@srcdir@ $(HCFLAGS) -hidir . -o $@ -c $< #%.hi: %.o # @: clean: rm -f -- *.hi *.o *_hsc.c *_hsc.h distclean-local: rm -f -- *.hi *.o *_hsc.c *_hsc.h config.log config.status # $(PACKAGE)_static bin-dist: $(PACKAGE) $(RM) -rf -- $(BINDISTNAME) mkdir $(BINDISTNAME) strip -- $(PACKAGE) || true #strip -- $(PACKAGE)_static || true cp -- $(PACKAGE) $(BINDISTNAME) #cp -- $(PACKAGE)_static $(BINDISTNAME) tar cvf $(BINDISTNAME).tar $(BINDISTNAME) gzip -f -- $(BINDISTNAME).tar $(RM) -rf -- $(BINDISTNAME) #-include depend.make #.PHONY: depend # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: DrIFT-2.2.3/src/ParseLib2.hs0000644000076400007640000002241010753606147012275 00000000000000{----------------------------------------------------------------------------- A LIBRARY OF MONADIC PARSER COMBINATORS 29th July 1996 Revised, October 1996 Graham Hutton Erik Meijer University of Nottingham University of Utrecht This Haskell 1.4 script defines a library of parser combinators, and is taken from sections 1-6 of our article "Monadic Parser Combinators". Some changes to the library have been made in the move from Gofer to Haskell: * Do notation is used in place of monad comprehension notation; * The parser datatype is defined using "newtype", to avoid the overhead of tagging and untagging parsers with the P constructor. -----------------------------------------------------------------------------} -- Added to April 1997, for offside rule, {- -} comments, annotations, -- extra characters in identifiers .. - -- extra combinator parsers for skipping over input module ParseLib2 (Parser, item, papply, (+++), sat, many, many1, sepby, sepby1, chainl, chainl1, chainr, chainr1, ops, bracket, char, digit, lower, upper, letter, alphanum, string, ident, nat, int, spaces, comment, junk, parse, token, natural, integer, symbol, identifier, many1_offside,many_offside,off, opt, skipUntil, skipUntilOff,skipUntilParse,skipNest) where import Char import Monad infixr 5 +++ --- The parser monad --------------------------------------------------------- newtype Parser a = P (Pos -> Pstring -> [(a,Pstring)]) type Pstring = (Pos,String) type Pos = (Int,Int) instance Functor Parser where -- fmap :: (a -> b) -> (Parser a -> Parser b) fmap f (P p) = P (\pos inp -> [(f v, out) | (v,out) <- p pos inp]) instance Monad Parser where -- return :: a -> Parser a return v = P (\pos inp -> [(v,inp)]) -- >>= :: Parser a -> (a -> Parser b) -> Parser b (P p) >>= f = P (\pos inp -> concat [papply (f v) pos out | (v,out) <- p pos inp]) fail s = P (\pos inp -> []) instance MonadPlus Parser where -- mzero :: Parser a mzero = P (\pos inp -> []) -- mplus :: Parser a -> Parser a -> Parser a (P p) `mplus` (P q) = P (\pos inp -> (p pos inp ++ q pos inp)) -- bits which donn't fit into Haskell's type classes just yet :-( env :: Parser Pos env = P(\pos inp -> [(pos,inp)]) setenv :: Pos -> Parser a -> Parser a setenv s (P m) = P $ \_ -> m s update :: (Pstring -> Pstring) -> Parser Pstring update f = P( \pos s -> [(s,f s)]) set :: Pstring -> Parser Pstring set s = update (\_ -> s) fetch :: Parser Pstring fetch = update id --- Other primitive parser combinators --------------------------------------- item :: Parser Char item = do (pos,x:_) <- update newstate defpos <- env if onside pos defpos then return x else mzero force :: Parser a -> Parser a force (P p) = P (\pos inp -> let x = p pos inp in (fst (head x), snd (head x)) : tail x) first :: Parser a -> Parser a first (P p) = P (\pos inp -> case p pos inp of [] -> [] (x:xs) -> [x]) papply :: Parser a -> Pos -> Pstring -> [(a,Pstring)] papply (P p) pos inp = p pos inp -- layout handling functions onside :: Pos -> Pos -> Bool onside (l,c) (dl,dc) = (c > dc) || (l == dl) newstate :: Pstring -> Pstring newstate ((l,c),x:xs) = ((l',c'),xs) where (l',c') = case x of '\n' -> (l+1,0) '\t' -> (l,((c `div` 8) +1)*8) _ -> (l,c+1) --- Derived combinators ------------------------------------------------------ (+++) :: Parser a -> Parser a -> Parser a p +++ q = first (p `mplus` q) sat :: (Char -> Bool) -> Parser Char sat p = do {x <- item; if p x then return x else mzero} many :: Parser a -> Parser [a] --many p = force (many1 p +++ return []) many p = (many1 p +++ return []) many1 :: Parser a -> Parser [a] many1 p = do {x <- p; xs <- many p; return (x:xs)} sepby :: Parser a -> Parser b -> Parser [a] p `sepby` sep = (p `sepby1` sep) +++ return [] sepby1 :: Parser a -> Parser b -> Parser [a] p `sepby1` sep = do {x <- p; xs <- many (do {sep; p}); return (x:xs)} chainl :: Parser a -> Parser (a -> a -> a) -> a -> Parser a chainl p op v = (p `chainl1` op) +++ return v chainl1 :: Parser a -> Parser (a -> a -> a) -> Parser a p `chainl1` op = do {x <- p; rest x} where rest x = do {f <- op; y <- p; rest (f x y)} +++ return x chainr :: Parser a -> Parser (a -> a -> a) -> a -> Parser a chainr p op v = (p `chainr1` op) +++ return v chainr1 :: Parser a -> Parser (a -> a -> a) -> Parser a p `chainr1` op = do {x <- p; rest x} where rest x = do {f <- op; y <- p `chainr1` op; return (f x y)} +++ return x ops :: [(Parser a, b)] -> Parser b ops xs = foldr1 (+++) [do {p; return op} | (p,op) <- xs] bracket :: Parser a -> Parser b -> Parser c -> Parser b bracket open p close = do {open; x <- p; close; return x} --- Useful parsers ----------------------------------------------------------- char :: Char -> Parser Char char x = sat (\y -> x == y) digit :: Parser Char digit = sat isDigit lower :: Parser Char lower = sat isLower upper :: Parser Char upper = sat isUpper letter :: Parser Char letter = sat isAlpha alphanum :: Parser Char alphanum = sat (\x -> isAlphaNum x || x `elem` ['\'','_','.','#']) string :: String -> Parser String string "" = return "" string (x:xs) = do {char x; string xs; return (x:xs)} -- parse a Haskell 98 identifier, when the input is a valid Haskell 98 identifier (it's more liberal than H98) ident :: Parser String ident = do {x <- lower +++ char '_' ; xs <- many alphanum; return (x:xs)} nat :: Parser Int nat = do {x <- digit; return (digitToInt x)} `chainl1` return op where m `op` n = 10*m + n int :: Parser Int int = do {char '-'; n <- nat; return (-n)} +++ nat --- Lexical combinators ------------------------------------------------------ spaces :: Parser () spaces = do {many1 (sat isJunk); return ()} isJunk x = isSpace x || (not . isPrint) x || isControl x comment :: Parser () comment = onelinecomment `mplus` bracecomment onelinecomment :: Parser () onelinecomment = do {string "--"; many (sat (\x -> x /= '\n')); return ()} bracecomment :: Parser () bracecomment = skipNest (do{string "{-"; sat (`notElem` ['!','@','*'])}) (do{sat (`notElem` ['!','@','*']);string "-}"}) junk :: Parser () junk = do _ <- setenv (0,-1) (many (spaces +++ comment)) return () parse :: Parser a -> Parser a parse p = do {junk; p} token :: Parser a -> Parser a token p = do {v <- p; junk; return v} --- Token parsers ------------------------------------------------------------ natural :: Parser Int natural = token nat integer :: Parser Int integer = token int symbol :: String -> Parser String symbol xs = token (string xs) identifier :: [String] -> Parser String identifier ks = token (do {x <- ident; if not (elem x ks) then return x else mzero}) --- Offside Parsers --------------------------------------------------------- many1_offside :: Parser a -> Parser [a] many1_offside p = do (pos,_) <- fetch vs <- setenv pos (many1 (off p)) return vs many_offside :: Parser a -> Parser [a] many_offside p = many1_offside p +++ mzero off :: Parser a -> Parser a off p = do (dl,dc) <- env ((l,c),_) <- fetch if c == dc then setenv (l,dc) p else mzero ------------------------------------------------------------------------------ -- Noel's own favourite parsers skipUntil :: Parser a -> Parser a skipUntil p = p +++ do token (many1 (sat (not . isSpace))) skipUntil p skipNest :: Parser a -> Parser b -> Parser () skipNest start finish = let x = do{ finish;return()} +++ do{skipNest start finish;x} +++ do{item;x} in do{start; x} -- this are messy, but make writing incomplete parsers a whole lot -- easier. skipUntilOff :: Parser a -> Parser [a] skipUntilOff p = fmap (concatMap justs) . many_offside $ fmap Just p +++ fmap (const Nothing) (many1 (token (many1 item))) skipUntilParse :: Char -> Parser a -> Parser [a] skipUntilParse u p = fmap (concatMap justs) . many $ do r<- p token (char u) return (Just r) +++ do many . token . many1 . sat $(/= u) token (char u) return Nothing justs (Just a) = [a] justs Nothing = [] opt p = p +++ return [] DrIFT-2.2.3/src/GetOpt.hs0000644000076400007640000002346210753606147011724 00000000000000----------------------------------------------------------------------------------------- -- A Haskell port of GNU's getopt library -- -- Sven Panne Oct. 1996; last change: Jul. 1998 -- -- Two rather obscure features are missing: The Bash 2.0 non-option hack (if you don't -- already know it, you probably don't want to hear about it...) and the recognition of -- long options with a single dash (e.g. '-help' is recognised as '--help', as long as -- there is no short option 'h'). -- -- Other differences between GNU's getopt and this implementation: -- * To enforce a coherent description of options and arguments, there are explanation -- fields in the option/argument descriptor. -- * Error messages are now more informative, but no longer POSIX compliant... :-( -- -- And a final Haskell advertisement: The GNU C implementation uses well over 1100 lines, -- we need only 199 here, including a 46 line example! :-) ----------------------------------------------------------------------------------------- module GetOpt ( ArgOrder(..), OptDescr(..), ArgDescr(..), usageInfo, getOpt ) where import List(isPrefixOf) data ArgOrder a -- what to do with options following non-options: = RequireOrder -- no option processing after first non-option | Permute -- freely intersperse options and non-options | ReturnInOrder (String -> a) -- wrap non-options into options data OptDescr a = -- description of a single options: Option [Char] -- list of short option characters [String] -- list of long option strings (without "--") (ArgDescr a) -- argument descriptor String -- explanation of option for user data ArgDescr a -- description of an argument option: = NoArg a -- no argument expected | ReqArg (String -> a) String -- option requires argument | OptArg (Maybe String -> a) String -- optional argument data OptKind a -- kind of cmd line arg (internal use only): = Opt a -- an option | NonOpt String -- a non-option | EndOfOpts -- end-of-options marker (i.e. "--") | OptErr String -- something went wrong... usageInfo :: String -- header -> [OptDescr a] -- option descriptors -> String -- nicely formatted decription of options usageInfo header optDescr = unlines (header:table) where (ss,ls,ds) = (unzip3 . map fmtOpt) optDescr table = zipWith3 paste (sameLen ss) (sameLen ls) (sameLen ds) paste x y z = " " ++ x ++ " " ++ y ++ " " ++ z sameLen xs = flushLeft ((maximum . map length) xs) xs flushLeft n xs = [ take n (x ++ repeat ' ') | x <- xs ] fmtOpt :: OptDescr a -> (String,String,String) fmtOpt (Option sos los ad descr) = (sepBy ", " (map (fmtShort ad) sos), sepBy ", " (map (fmtLong ad) los), descr) where sepBy _ [] = "" sepBy _ [x] = x sepBy sep (x:xs) = x ++ sep ++ sepBy sep xs fmtShort :: ArgDescr a -> Char -> String fmtShort (NoArg _ ) so = "-" ++ [so] fmtShort (ReqArg _ ad) so = "-" ++ [so] ++ " " ++ ad fmtShort (OptArg _ ad) so = "-" ++ [so] ++ "[" ++ ad ++ "]" fmtLong :: ArgDescr a -> String -> String fmtLong (NoArg _ ) lo = "--" ++ lo fmtLong (ReqArg _ ad) lo = "--" ++ lo ++ "=" ++ ad fmtLong (OptArg _ ad) lo = "--" ++ lo ++ "[=" ++ ad ++ "]" getOpt :: ArgOrder a -- non-option handling -> [OptDescr a] -- option descriptors -> [String] -- the commandline arguments -> ([a],[String],[String]) -- (options,non-options,error messages) getOpt _ _ [] = ([],[],[]) getOpt ordering optDescr args = procNextOpt opt ordering where procNextOpt (Opt o) _ = (o:os,xs,es) procNextOpt (NonOpt x) RequireOrder = ([],x:rest,[]) procNextOpt (NonOpt x) Permute = (os,x:xs,es) procNextOpt (NonOpt x) (ReturnInOrder f) = (f x :os, xs,es) procNextOpt EndOfOpts RequireOrder = ([],rest,[]) procNextOpt EndOfOpts Permute = ([],rest,[]) procNextOpt EndOfOpts (ReturnInOrder f) = (map f rest,[],[]) procNextOpt (OptErr e) _ = (os,xs,e:es) (opt,rest) = getNext args optDescr (os,xs,es) = getOpt ordering optDescr rest -- take a look at the next cmd line arg and decide what to do with it getNext :: [String] -> [OptDescr a] -> (OptKind a,[String]) getNext (('-':'-':[]):rest) _ = (EndOfOpts,rest) getNext (('-':'-':xs):rest) optDescr = longOpt xs rest optDescr getNext (('-':x:xs) :rest) optDescr = shortOpt x xs rest optDescr getNext (a :rest) _ = (NonOpt a,rest) getNext [] _ = error "getNext: impossible" -- handle long option longOpt :: String -> [String] -> [OptDescr a] -> (OptKind a,[String]) longOpt xs rest optDescr = long ads arg rest where (opt,arg) = break (=='=') xs options = [ o | o@(Option _ ls _ _) <- optDescr, l <- ls, opt `isPrefixOf` l ] ads = [ ad | Option _ _ ad _ <- options ] optStr = ("--"++opt) long (_:_:_) _ rest1 = (errAmbig options optStr,rest1) long [NoArg a ] [] rest1 = (Opt a,rest1) long [NoArg _ ] ('=':_) rest1 = (errNoArg optStr,rest1) long [ReqArg _ d] [] [] = (errReq d optStr,[]) long [ReqArg f _] [] (r:rest1) = (Opt (f r),rest1) long [ReqArg f _] ('=':ys) rest1 = (Opt (f ys),rest1) long [OptArg f _] [] rest1 = (Opt (f Nothing),rest1) long [OptArg f _] ('=':ys) rest1 = (Opt (f (Just ys)),rest1) long [_] (_ :_) _ = error "long: impossible" long [] _ rest1 = (errUnrec optStr,rest1) -- handle short option shortOpt :: Char -> String -> [String] -> [OptDescr a] -> (OptKind a,[String]) shortOpt x xs rest optDescr = short ads xs rest where options = [ o | o@(Option ss _ _ _) <- optDescr, s <- ss, x == s ] ads = [ ad | Option _ _ ad _ <- options ] optStr = '-':[x] short (_:_:_) _ rest1 = (errAmbig options optStr,rest1) short (NoArg a :_) [] rest1 = (Opt a,rest1) short (NoArg a :_) ys rest1 = (Opt a,('-':ys):rest1) short (ReqArg _ d:_) [] [] = (errReq d optStr,[]) short (ReqArg f _:_) [] (r:rest1) = (Opt (f r),rest1) short (ReqArg f _:_) ys rest1 = (Opt (f ys),rest1) short (OptArg f _:_) [] rest1 = (Opt (f Nothing),rest1) short (OptArg f _:_) ys rest1 = (Opt (f (Just ys)),rest1) short [] [] rest1 = (errUnrec optStr,rest1) short [] ys rest1 = (errUnrec optStr,('-':ys):rest1) -- miscellaneous error formatting errAmbig :: [OptDescr a] -> String -> OptKind a errAmbig ods optStr = OptErr (usageInfo header ods) where header = "option `" ++ optStr ++ "' is ambiguous; could be one of:" errReq :: String -> String -> OptKind a errReq d optStr = OptErr ("option `" ++ optStr ++ "' requires an argument " ++ d ++ "\n") errUnrec :: String -> OptKind a errUnrec optStr = OptErr ("unrecognized option `" ++ optStr ++ "'\n") errNoArg :: String -> OptKind a errNoArg optStr = OptErr ("option `" ++ optStr ++ "' doesn't allow an argument\n") {- ----------------------------------------------------------------------------------------- -- and here a small and hopefully enlightening example: data Flag = Verbose | Version | Name String | Output String | Arg String deriving Show options :: [OptDescr Flag] options = [Option ['v'] ["verbose"] (NoArg Verbose) "verbosely list files", Option ['V','?'] ["version","release"] (NoArg Version) "show version info", Option ['o'] ["output"] (OptArg out "FILE") "use FILE for dump", Option ['n'] ["name"] (ReqArg Name "USER") "only dump USER's files"] out :: Maybe String -> Flag out Nothing = Output "stdout" out (Just o) = Output o test :: ArgOrder Flag -> [String] -> String test order cmdline = case getOpt order options cmdline of (o,n,[] ) -> "options=" ++ show o ++ " args=" ++ show n ++ "\n" (_,_,errs) -> concat errs ++ usageInfo header options where header = "Usage: foobar [OPTION...] files..." -- example runs: -- putStr (test RequireOrder ["foo","-v"]) -- ==> options=[] args=["foo", "-v"] -- putStr (test Permute ["foo","-v"]) -- ==> options=[Verbose] args=["foo"] -- putStr (test (ReturnInOrder Arg) ["foo","-v"]) -- ==> options=[Arg "foo", Verbose] args=[] -- putStr (test Permute ["foo","--","-v"]) -- ==> options=[] args=["foo", "-v"] -- putStr (test Permute ["-?o","--name","bar","--na=baz"]) -- ==> options=[Version, Output "stdout", Name "bar", Name "baz"] args=[] -- putStr (test Permute ["--ver","foo"]) -- ==> option `--ver' is ambiguous; could be one of: -- -v --verbose verbosely list files -- -V, -? --version, --release show version info -- Usage: foobar [OPTION...] files... -- -v --verbose verbosely list files -- -V, -? --version, --release show version info -- -o[FILE] --output[=FILE] use FILE for dump -- -n USER --name=USER only dump USER's files ----------------------------------------------------------------------------------------- -} DrIFT-2.2.3/src/CommandP.hs0000644000076400007640000000261510753606147012215 00000000000000-- parser for derive commands module CommandP (local,command,header) where import ParseLib2 import DataP -- command syntax {-!global : rule1, rule2, rule !-} {-! derive : rule1, rule2, !-} {-! for ty derive : rule , rule 2, .. !-} command = annotation global +++ annotation forType local = annotation loc global = do symbol "global" symbol ":" ts <- tag `sepby` symbol "," return (ts,Directive) forType = do symbol "for" ty <- cap symbol "derive" symbol ":" ts <- tag `sepby` symbol "," return (ts,TypeName ty) loc = do symbol "derive" symbol ":" tag `sepby` symbol "," cap = token (do x <- upper xs <- many alphanum return (x:xs)) icap = token $ do x <- upper xs <- many alphanum let f '.' = '/' f c = c return (x:map f xs) tag = token (many alphanum) annotation x = do symbol "{-!" r <- x symbol "!-}" return r -- parser for module headers header = do symbol "module" cap opt (do skipNest (symbol "(") (symbol ")") return []) symbol "where" many imports imports = do symbol "import" opt (symbol "qualified") i <- icap opt (symbol "as" >> cap) opt (symbol "hiding") opt (do skipNest (symbol "(") (symbol ")") return []) return i DrIFT-2.2.3/src/Pretty.lhs0000644000076400007640000007562010753606147012170 00000000000000********************************************************************************* * * * John Hughes's and Simon Peyton Jones's Pretty Printer Combinators * * * * based on "The Design of a Pretty-printing Library" * * in Advanced Functional Programming, * * Johan Jeuring and Erik Meijer (eds), LNCS 925 * * http://www.cs.chalmers.se/~rjmh/Papers/pretty.ps * * * * Heavily modified by Simon Peyton Jones, Dec 96 * * * ********************************************************************************* Version 3.0 28 May 1997 * Cured massive performance bug. If you write foldl <> empty (map (text.show) [1..10000]) you get quadratic behaviour with V2.0. Why? For just the same reason as you get quadratic behaviour with left-associated (++) chains. This is really bad news. One thing a pretty-printer abstraction should certainly guarantee is insensivity to associativity. It matters: suddenly GHC's compilation times went up by a factor of 100 when I switched to the new pretty printer. I fixed it with a bit of a hack (because I wanted to get GHC back on the road). I added two new constructors to the Doc type, Above and Beside: <> = Beside $$ = Above Then, where I need to get to a "TextBeside" or "NilAbove" form I "force" the Doc to squeeze out these suspended calls to Beside and Above; but in so doing I re-associate. It's quite simple, but I'm not satisfied that I've done the best possible job. I'll send you the code if you are interested. * Added new exports: punctuate, hang int, integer, float, double, rational, lparen, rparen, lbrack, rbrack, lbrace, rbrace, * fullRender's type signature has changed. Rather than producing a string it now takes an extra couple of arguments that tells it how to glue fragments of output together: fullRender :: Mode -> Int -- Line length -> Float -- Ribbons per line -> (TextDetails -> a -> a) -- What to do with text -> a -- What to do at the end -> Doc -> a -- Result The "fragments" are encapsulated in the TextDetails data type: data TextDetails = Chr Char | Str String | PStr FAST_STRING The Chr and Str constructors are obvious enough. The PStr constructor has a packed string (FAST_STRING) inside it. It's generated by using the new "ptext" export. An advantage of this new setup is that you can get the renderer to do output directly (by passing in a function of type (TextDetails -> IO () -> IO ()), rather than producing a string that you then print. Version 2.0 24 April 1997 * Made empty into a left unit for <> as well as a right unit; it is also now true that nest k empty = empty which wasn't true before. * Fixed an obscure bug in sep that occassionally gave very wierd behaviour * Added $+$ * Corrected and tidied up the laws and invariants ====================================================================== Relative to John's original paper, there are the following new features: 1. There's an empty document, "empty". It's a left and right unit for both <> and $$, and anywhere in the argument list for sep, hcat, hsep, vcat, fcat etc. It is Really Useful in practice. 2. There is a paragraph-fill combinator, fsep, that's much like sep, only it keeps fitting things on one line until itc can't fit any more. 3. Some random useful extra combinators are provided. <+> puts its arguments beside each other with a space between them, unless either argument is empty in which case it returns the other hcat is a list version of <> hsep is a list version of <+> vcat is a list version of $$ sep (separate) is either like hsep or like vcat, depending on what fits cat is behaves like sep, but it uses <> for horizontal conposition fcat is behaves like fsep, but it uses <> for horizontal conposition These new ones do the obvious things: char, semi, comma, colon, space, parens, brackets, braces, quotes, doubleQuotes 4. The "above" combinator, $$, now overlaps its two arguments if the last line of the top argument stops before the first line of the second begins. For example: text "hi" $$ nest 5 "there" lays out as hi there rather than hi there There are two places this is really useful a) When making labelled blocks, like this: Left -> code for left Right -> code for right LongLongLongLabel -> code for longlonglonglabel The block is on the same line as the label if the label is short, but on the next line otherwise. b) When laying out lists like this: [ first , second , third ] which some people like. But if the list fits on one line you want [first, second, third]. You can't do this with John's original combinators, but it's quite easy with the new $$. The combinator $+$ gives the original "never-overlap" behaviour. 5. Several different renderers are provided: * a standard one * one that uses cut-marks to avoid deeply-nested documents simply piling up in the right-hand margin * one that ignores indentation (fewer chars output; good for machines) * one that ignores indentation and newlines (ditto, only more so) 6. Numerous implementation tidy-ups Use of unboxed data types to speed up the implementation \begin{code} module Pretty ( Doc, -- Abstract Mode(..), TextDetails(..), empty, isEmpty, nest, text, char, ptext, tshow, int, integer, float, double, -- rational, parens, brackets, braces, quotes, doubleQuotes, semi, comma, colon, space, equals, lparen, rparen, lbrack, rbrack, lbrace, rbrace, (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, hang, punctuate, -- renderStyle, -- Haskell 1.3 only render, fullRender ) where -- Don't import Util( assertPanic ) because it makes a loop in the module structure infixl 6 <> infixl 6 <+> infixl 5 $$, $+$ \end{code} ********************************************************* * * \subsection{CPP magic so that we can compile with both GHC and Hugs} * * ********************************************************* The library uses unboxed types to get a bit more speed, but these CPP macros allow you to use either GHC or Hugs. To get GHC, just set the CPP variable __GLASGOW_HASKELL__ ********************************************************* * * \subsection{The interface} * * ********************************************************* The primitive @Doc@ values \begin{code} empty :: Doc isEmpty :: Doc -> Bool text :: String -> Doc char :: Char -> Doc semi, comma, colon, space, equals :: Doc lparen, rparen, lbrack, rbrack, lbrace, rbrace :: Doc parens, brackets, braces :: Doc -> Doc quotes, doubleQuotes :: Doc -> Doc int :: Int -> Doc integer :: Integer -> Doc float :: Float -> Doc double :: Double -> Doc --rational :: Rational -> Doc \end{code} Combining @Doc@ values \begin{code} (<>) :: Doc -> Doc -> Doc -- Beside hcat :: [Doc] -> Doc -- List version of <> (<+>) :: Doc -> Doc -> Doc -- Beside, separated by space hsep :: [Doc] -> Doc -- List version of <+> ($$) :: Doc -> Doc -> Doc -- Above; if there is no -- overlap it "dovetails" the two vcat :: [Doc] -> Doc -- List version of $$ cat :: [Doc] -> Doc -- Either hcat or vcat sep :: [Doc] -> Doc -- Either hsep or vcat fcat :: [Doc] -> Doc -- ``Paragraph fill'' version of cat fsep :: [Doc] -> Doc -- ``Paragraph fill'' version of sep nest :: Int -> Doc -> Doc -- Nested \end{code} GHC-specific ones. \begin{code} hang :: Doc -> Int -> Doc -> Doc punctuate :: Doc -> [Doc] -> [Doc] -- punctuate p [d1, ... dn] = [d1 <> p, d2 <> p, ... dn-1 <> p, dn] \end{code} Displaying @Doc@ values. \begin{code} instance Show Doc where showsPrec prec doc cont = showDoc doc cont render :: Doc -> String -- Uses default style fullRender :: Mode -> Int -- Line length -> Float -- Ribbons per line -> (TextDetails -> a -> a) -- What to do with text -> a -- What to do at the end -> Doc -> a -- Result {- When we start using 1.3 renderStyle :: Style -> Doc -> String data Style = Style { lineLength :: Int, -- In chars ribbonsPerLine :: Float, -- Ratio of ribbon length to line length mode :: Mode } style :: Style -- The default style style = Style { lineLength = 100, ribbonsPerLine = 2.5, mode = PageMode } -} data Mode = PageMode -- Normal | ZigZagMode -- With zig-zag cuts | LeftMode -- No indentation, infinitely long lines | OneLineMode -- All on one line \end{code} ********************************************************* * * \subsection{The @Doc@ calculus} * * ********************************************************* The @Doc@ combinators satisfy the following laws: \begin{verbatim} Laws for $$ ~~~~~~~~~~~ (x $$ y) $$ z = x $$ (y $$ z) empty $$ x = x x $$ empty = x ...ditto $+$... Laws for <> ~~~~~~~~~~~ (x <> y) <> z = x <> (y <> z) empty <> x = empty x <> empty = x ...ditto <+>... Laws for text ~~~~~~~~~~~~~ text s <> text t = text (s++t) text "" <> x = x, if x non-empty Laws for nest ~~~~~~~~~~~~~ nest 0 x = x nest k (nest k' x) = nest (k+k') x nest k (x <> y) = nest k z <> nest k y nest k (x $$ y) = nest k x $$ nest k y nest k empty = empty x <> nest k y = x <> y, if x non-empty ** Note the side condition on ! It is this that ** makes it OK for empty to be a left unit for <>. Miscellaneous ~~~~~~~~~~~~~ (text s <> x) $$ y = text s <> ((text "" <> x)) $$ nest (-length s) y) (x $$ y) <> z = x $$ (y <> z) if y non-empty Laws for list versions ~~~~~~~~~~~~~~~~~~~~~~ sep (ps++[empty]++qs) = sep (ps ++ qs) ...ditto hsep, hcat, vcat, fill... nest k (sep ps) = sep (map (nest k) ps) ...ditto hsep, hcat, vcat, fill... Laws for oneLiner ~~~~~~~~~~~~~~~~~ oneLiner (nest k p) = nest k (oneLiner p) oneLiner (x <> y) = oneLiner x <> oneLiner y \end{verbatim} You might think that the following verion of would be neater: \begin{verbatim} <3 NO> (text s <> x) $$ y = text s <> ((empty <> x)) $$ nest (-length s) y) \end{verbatim} But it doesn't work, for if x=empty, we would have \begin{verbatim} text s $$ y = text s <> (empty $$ nest (-length s) y) = text s <> nest (-length s) y \end{verbatim} ********************************************************* * * \subsection{Simple derived definitions} * * ********************************************************* \begin{code} semi = char ';' colon = char ':' comma = char ',' space = char ' ' equals = char '=' lparen = char '(' rparen = char ')' lbrack = char '[' rbrack = char ']' lbrace = char '{' rbrace = char '}' tshow :: Show a => a -> Doc tshow n = text (show n) int n = text (show n) integer n = text (show n) float n = text (show n) double n = text (show n) -- rational n = text (show n) -- SIGBJORN wrote instead: -- rational n = text (show (fromRationalX n)) quotes p = char '`' <> p <> char '\'' doubleQuotes p = char '"' <> p <> char '"' parens p = char '(' <> p <> char ')' brackets p = char '[' <> p <> char ']' braces p = char '{' <> p <> char '}' hcat = foldr (<>) empty hsep = foldr (<+>) empty vcat = foldr ($$) empty hang d1 n d2 = sep [d1, nest n d2] punctuate p [] = [] punctuate p (d:ds) = go d ds where go d [] = [d] go d (e:es) = (d <> p) : go e es \end{code} ********************************************************* * * \subsection{The @Doc@ data type} * * ********************************************************* A @Doc@ represents a {\em set} of layouts. A @Doc@ with no occurrences of @Union@ or @NoDoc@ represents just one layout. \begin{code} data Doc = Empty -- empty | NilAbove Doc -- text "" $$ x | TextBeside TextDetails Int Doc -- text s <> x | Nest Int Doc -- nest k x | Union Doc Doc -- ul `union` ur | NoDoc -- The empty set of documents | Beside Doc Bool Doc -- True <=> space between | Above Doc Bool Doc -- True <=> never overlap type RDoc = Doc -- RDoc is a "reduced Doc", guaranteed not to have a top-level Above or Beside reduceDoc :: Doc -> RDoc reduceDoc (Beside p g q) = beside p g (reduceDoc q) reduceDoc (Above p g q) = above p g (reduceDoc q) reduceDoc p = p data TextDetails = Chr Char | Str String | PStr String space_text = Chr ' ' nl_text = Chr '\n' \end{code} Here are the invariants: \begin{itemize} \item The argument of @NilAbove@ is never @Empty@. Therefore a @NilAbove@ occupies at least two lines. \item The arugment of @TextBeside@ is never @Nest@. \item The layouts of the two arguments of @Union@ both flatten to the same string. \item The arguments of @Union@ are either @TextBeside@, or @NilAbove@. \item The right argument of a union cannot be equivalent to the empty set (@NoDoc@). If the left argument of a union is equivalent to the empty set (@NoDoc@), then the @NoDoc@ appears in the first line. \item An empty document is always represented by @Empty@. It can't be hidden inside a @Nest@, or a @Union@ of two @Empty@s. \item The first line of every layout in the left argument of @Union@ is longer than the first line of any layout in the right argument. (1) ensures that the left argument has a first line. In view of (3), this invariant means that the right argument must have at least two lines. \end{itemize} \begin{code} -- Arg of a NilAbove is always an RDoc nilAbove_ p = NilAbove p -- Arg of a TextBeside is always an RDoc textBeside_ s sl p = TextBeside s sl p -- Arg of Nest is always an RDoc nest_ k p = Nest k p -- Args of union are always RDocs union_ p q = Union p q \end{code} Notice the difference between * NoDoc (no documents) * Empty (one empty document; no height and no width) * text "" (a document containing the empty string; one line high, but has no width) ********************************************************* * * \subsection{@empty@, @text@, @nest@, @union@} * * ********************************************************* \begin{code} empty = Empty isEmpty Empty = True isEmpty _ = False char c = textBeside_ (Chr c) 1 Empty text s = case length s of {sl -> textBeside_ (Str s) sl Empty} ptext s = case length s of {sl -> textBeside_ (PStr s) sl Empty} nest k p = mkNest k (reduceDoc p) -- Externally callable version -- mkNest checks for Nest's invariant that it doesn't have an Empty inside it mkNest k (Nest k1 p) = mkNest (k + k1) p mkNest k NoDoc = NoDoc mkNest k Empty = Empty mkNest 0 p = p -- Worth a try! mkNest k p = nest_ k p -- mkUnion checks for an empty document mkUnion Empty q = Empty mkUnion p q = p `union_` q \end{code} ********************************************************* * * \subsection{Vertical composition @$$@} * * ********************************************************* \begin{code} p $$ q = Above p False q p $+$ q = Above p True q above :: Doc -> Bool -> RDoc -> RDoc above (Above p g1 q1) g2 q2 = above p g1 (above q1 g2 q2) above p@(Beside _ _ _) g q = aboveNest (reduceDoc p) g 0 (reduceDoc q) above p g q = aboveNest p g 0 (reduceDoc q) aboveNest :: RDoc -> Bool -> Int -> RDoc -> RDoc -- Specfication: aboveNest p g k q = p $g$ (nest k q) aboveNest NoDoc g k q = NoDoc aboveNest (p1 `Union` p2) g k q = aboveNest p1 g k q `union_` aboveNest p2 g k q aboveNest Empty g k q = mkNest k q aboveNest (Nest k1 p) g k q = nest_ k1 (aboveNest p g (k - k1) q) -- p can't be Empty, so no need for mkNest aboveNest (NilAbove p) g k q = nilAbove_ (aboveNest p g k q) aboveNest (TextBeside s sl p) g k q = textBeside_ s sl rest where k1 = k - sl rest = case p of Empty -> nilAboveNest g k1 q other -> aboveNest p g k1 q \end{code} \begin{code} nilAboveNest :: Bool -> Int -> RDoc -> RDoc -- Specification: text s <> nilaboveNest g k q -- = text s <> (text "" $g$ nest k q) nilAboveNest g k Empty = Empty -- Here's why the "text s <>" is in the spec! nilAboveNest g k (Nest k1 q) = nilAboveNest g (k + k1) q nilAboveNest g k q | (not g) && (k > 0) -- No newline if no overlap = textBeside_ (Str (spaces k)) k q | otherwise -- Put them really above = nilAbove_ (mkNest k q) \end{code} ********************************************************* * * \subsection{Horizontal composition @<>@} * * ********************************************************* \begin{code} p <> q = Beside p False q p <+> q = Beside p True q beside :: Doc -> Bool -> RDoc -> RDoc -- Specification: beside g p q = p q beside NoDoc g q = NoDoc beside (p1 `Union` p2) g q = (beside p1 g q) `union_` (beside p2 g q) beside Empty g q = q beside (Nest k p) g q = nest_ k (beside p g q) -- p non-empty beside p@(Beside p1 g1 q1) g2 q2 {- (A `op1` B) `op2` C == A `op1` (B `op2` C) iff op1 == op2 [ && (op1 == <> || op1 == <+>) ] -} | g1 == g2 = beside p1 g1 (beside q1 g2 q2) | otherwise = beside (reduceDoc p) g2 q2 beside p@(Above _ _ _) g q = beside (reduceDoc p) g q beside (NilAbove p) g q = nilAbove_ (beside p g q) beside (TextBeside s sl p) g q = textBeside_ s sl rest where rest = case p of Empty -> nilBeside g q other -> beside p g q \end{code} \begin{code} nilBeside :: Bool -> RDoc -> RDoc -- Specification: text "" <> nilBeside g p -- = text "" p nilBeside g Empty = Empty -- Hence the text "" in the spec nilBeside g (Nest _ p) = nilBeside g p nilBeside g p | g = textBeside_ space_text 1 p | otherwise = p \end{code} ********************************************************* * * \subsection{Separate, @sep@, Hughes version} * * ********************************************************* \begin{code} -- Specification: sep ps = oneLiner (hsep ps) -- `union` -- vcat ps sep = sepX True -- Separate with spaces cat = sepX False -- Don't sepX x [] = empty sepX x (p:ps) = sep1 x (reduceDoc p) 0 ps -- Specification: sep1 g k ys = sep (x : map (nest k) ys) -- = oneLiner (x nest k (hsep ys)) -- `union` x $$ nest k (vcat ys) sep1 :: Bool -> RDoc -> Int -> [Doc] -> RDoc sep1 g NoDoc k ys = NoDoc sep1 g (p `Union` q) k ys = sep1 g p k ys `union_` (aboveNest q False k (reduceDoc (vcat ys))) sep1 g Empty k ys = mkNest k (sepX g ys) sep1 g (Nest n p) k ys = nest_ n (sep1 g p (k - n) ys) sep1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (reduceDoc (vcat ys))) sep1 g (TextBeside s sl p) k ys = textBeside_ s sl (sepNB g p (k - sl) ys) -- Specification: sepNB p k ys = sep1 (text "" <> p) k ys -- Called when we have already found some text in the first item -- We have to eat up nests sepNB g (Nest _ p) k ys = sepNB g p k ys sepNB g Empty k ys = oneLiner (nilBeside g (reduceDoc rest)) `mkUnion` nilAboveNest False k (reduceDoc (vcat ys)) where rest | g = hsep ys | otherwise = hcat ys sepNB g p k ys = sep1 g p k ys \end{code} ********************************************************* * * \subsection{@fill@} * * ********************************************************* \begin{code} fsep = fill True fcat = fill False -- Specification: -- fill [] = empty -- fill [p] = p -- fill (p1:p2:ps) = oneLiner p1 <#> nest (length p1) -- (fill (oneLiner p2 : ps)) -- `union` -- p1 $$ fill ps fill g [] = empty fill g (p:ps) = fill1 g (reduceDoc p) 0 ps fill1 :: Bool -> RDoc -> Int -> [Doc] -> Doc fill1 g NoDoc k ys = NoDoc fill1 g (p `Union` q) k ys = fill1 g p k ys `union_` (aboveNest q False k (fill g ys)) fill1 g Empty k ys = mkNest k (fill g ys) fill1 g (Nest n p) k ys = nest_ n (fill1 g p (k - n) ys) fill1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (fill g ys)) fill1 g (TextBeside s sl p) k ys = textBeside_ s sl (fillNB g p (k - sl) ys) fillNB g (Nest _ p) k ys = fillNB g p k ys fillNB g Empty k [] = Empty fillNB g Empty k (y:ys) = nilBeside g (fill1 g (oneLiner (reduceDoc y)) k1 ys) `mkUnion` nilAboveNest False k (fill g (y:ys)) where k1 | g = k - 1 | otherwise = k fillNB g p k ys = fill1 g p k ys \end{code} ********************************************************* * * \subsection{Selecting the best layout} * * ********************************************************* \begin{code} best :: Mode -> Int -- Line length -> Int -- Ribbon length -> RDoc -> RDoc -- No unions in here! best OneLineMode w r p = get p where get Empty = Empty get NoDoc = NoDoc get (NilAbove p) = nilAbove_ (get p) get (TextBeside s sl p) = textBeside_ s sl (get p) get (Nest k p) = get p -- Elide nest get (p `Union` q) = first (get p) (get q) best mode w r p = get w p where get :: Int -- (Remaining) width of line -> Doc -> Doc get w Empty = Empty get w NoDoc = NoDoc get w (NilAbove p) = nilAbove_ (get w p) get w (TextBeside s sl p) = textBeside_ s sl (get1 w sl p) get w (Nest k p) = nest_ k (get (w - k) p) get w (p `Union` q) = nicest w r (get w p) (get w q) get1 :: Int -- (Remaining) width of line -> Int -- Amount of first line already eaten up -> Doc -- This is an argument to TextBeside => eat Nests -> Doc -- No unions in here! get1 w sl Empty = Empty get1 w sl NoDoc = NoDoc get1 w sl (NilAbove p) = nilAbove_ (get (w - sl) p) get1 w sl (TextBeside t tl p) = textBeside_ t tl (get1 w (sl + tl) p) get1 w sl (Nest k p) = get1 w sl p get1 w sl (p `Union` q) = nicest1 w r sl (get1 w sl p) (get1 w sl q) nicest w r p q = nicest1 w r 0 p q nicest1 w r sl p q | fits ((w `minn` r) - sl) p = p | otherwise = q fits :: Int -- Space available -> Doc -> Bool -- True if *first line* of Doc fits in space available fits n p | n < 0 = False fits n NoDoc = False fits n Empty = True fits n (NilAbove _) = True fits n (TextBeside _ sl p) = fits (n - sl) p minn x y | x < y = x | otherwise = y \end{code} @first@ and @nonEmptySet@ are similar to @nicest@ and @fits@, only simpler. @first@ returns its first argument if it is non-empty, otherwise its second. \begin{code} first p q | nonEmptySet p = p | otherwise = q nonEmptySet NoDoc = False nonEmptySet (p `Union` q) = True nonEmptySet Empty = True nonEmptySet (NilAbove p) = True -- NoDoc always in first line nonEmptySet (TextBeside _ _ p) = nonEmptySet p nonEmptySet (Nest _ p) = nonEmptySet p \end{code} @oneLiner@ returns the one-line members of the given set of @Doc@s. \begin{code} oneLiner :: Doc -> Doc oneLiner NoDoc = NoDoc oneLiner Empty = Empty oneLiner (NilAbove p) = NoDoc oneLiner (TextBeside s sl p) = textBeside_ s sl (oneLiner p) oneLiner (Nest k p) = nest_ k (oneLiner p) oneLiner (p `Union` q) = oneLiner p \end{code} ********************************************************* * * \subsection{Displaying the best layout} * * ********************************************************* \begin{code} {- renderStyle Style{mode, lineLength, ribbonsPerLine} doc = fullRender mode lineLength ribbonsPerLine doc "" -} render doc = showDoc doc "" showDoc doc rest = fullRender PageMode 100 1.5 string_txt rest doc string_txt (Chr c) s = c:s string_txt (Str s1) s2 = s1 ++ s2 string_txt (PStr s1) s2 = s1 ++ s2 \end{code} \begin{code} fullRender OneLineMode _ _ txt end doc = easy_display space_text txt end (reduceDoc doc) fullRender LeftMode _ _ txt end doc = easy_display nl_text txt end (reduceDoc doc) fullRender mode line_length ribbons_per_line txt end doc = display mode line_length ribbon_length txt end best_doc where best_doc = best mode hacked_line_length ribbon_length (reduceDoc doc) hacked_line_length, ribbon_length :: Int ribbon_length = round (fromIntegral line_length / ribbons_per_line) hacked_line_length = case mode of { ZigZagMode -> maxBound; other -> line_length } display mode page_width ribbon_width txt end doc = case page_width - ribbon_width of { gap_width -> case gap_width `quot` 2 of { shift -> let lay k (Nest k1 p) = lay (k + k1) p lay k Empty = end lay k (NilAbove p) = nl_text `txt` lay k p lay k (TextBeside s sl p) = case mode of ZigZagMode | k >= gap_width -> nl_text `txt` ( Str (multi_ch shift '/') `txt` ( nl_text `txt` ( lay1 (k - shift) s sl p))) | k < 0 -> nl_text `txt` ( Str (multi_ch shift '\\') `txt` ( nl_text `txt` ( lay1 (k + shift) s sl p ))) other -> lay1 k s sl p lay1 k s sl p = Str (indent k) `txt` (s `txt` lay2 (k + sl) p) lay2 k (NilAbove p) = nl_text `txt` lay k p lay2 k (TextBeside s sl p) = s `txt` (lay2 (k + sl) p) lay2 k (Nest _ p) = lay2 k p lay2 k Empty = end in lay 0 doc }} cant_fail = error "easy_display: NoDoc" easy_display nl_text txt end doc = lay doc cant_fail where lay NoDoc no_doc = no_doc lay (Union p q) no_doc = {- lay p -} (lay q cant_fail) -- Second arg can't be NoDoc lay (Nest k p) no_doc = lay p no_doc lay Empty no_doc = end lay (NilAbove p) no_doc = nl_text `txt` lay p cant_fail -- NoDoc always on first line lay (TextBeside s sl p) no_doc = s `txt` lay p no_doc indent n | n >= 8 = '\t' : indent (n - 8) | otherwise = spaces n multi_ch 0 ch = "" multi_ch n ch = ch : multi_ch (n - 1) ch spaces 0 = "" spaces n = ' ' : spaces (n - 1) \end{code} DrIFT-2.2.3/src/Makefile.am0000644000076400007640000000337710753606147012225 00000000000000 AUTOMAKE_OPTIONS = no-dependencies foreign subdir-objects RULES= Rules/Arbitrary.hs Rules/Binary.hs Rules/BitsBinary.hs Rules/FunctorM.hs \ Rules/Generic.hs Rules/GhcBinary.hs Rules/Monoid.hs Rules/Standard.hs \ Rules/Utility.hs Rules/Xml.hs bin_PROGRAMS = DrIFT DrIFT_SOURCES = ${RULES} ChaseImports.hs CommandP.hs DataP.lhs DrIFT.hs GetOpt.hs \ Unlit.hs ParseLib2.hs PreludData.hs Pretty.lhs RuleUtils.hs Version.hs GenUtil.hs # DrIFT_static_SOURCES = $(DrIFT_SOURCES) # DrIFT_static_LINK = $(DrIFT_LINK) -static -optl-static -ldl EXTRA_DIST = Rules.hs collect_rules.sh # EXTRA_PROGRAMS = DrIFT_static BUILT_SOURCES=Rules.hs DrIFT_LINK = $(HC) $(HCFLAGS) -o $@ Rules.hs: collect_rules.sh sh ./collect_rules.sh > $@ SUFFIXES = .hs .lhs .o .hi # COMPILE = $(HC) $(HCFLAGS) # all: DrIFT DrIFT: $(DrIFT_SOURCES) $(nodist_DrIFT_SOURCES) $(HC) $(HCFLAGS) -i. -i@srcdir@ -hidir . -odir . -o $@ --make @srcdir@/DrIFT.hs #.hs.o: # $(HC) -O -i. -i@srcdir@ $(HCFLAGS) -hidir . -o $@ -c $< #.lhs.o: # $(HC) -O -i. -i@srcdir@ $(HCFLAGS) -hidir . -o $@ -c $< #%.hi: %.o # @: clean: rm -f -- *.hi *.o *_hsc.c *_hsc.h distclean-local: rm -f -- *.hi *.o *_hsc.c *_hsc.h config.log config.status #depend: # ghc -M -optdep-f -optdepdepend.make DrIFT.hs #depend.make: # ghc -M -optdep-f -optdepdepend.make DrIFT.hs BINDISTNAME=$(PACKAGE)-$(VERSION)-`uname -s`-`uname -m` # $(PACKAGE)_static bin-dist: $(PACKAGE) $(RM) -rf -- $(BINDISTNAME) mkdir $(BINDISTNAME) strip -- $(PACKAGE) || true #strip -- $(PACKAGE)_static || true cp -- $(PACKAGE) $(BINDISTNAME) #cp -- $(PACKAGE)_static $(BINDISTNAME) tar cvf $(BINDISTNAME).tar $(BINDISTNAME) gzip -f -- $(BINDISTNAME).tar $(RM) -rf -- $(BINDISTNAME) CLEANFILES = Rules/*.o Rules/*.hi #-include depend.make #.PHONY: depend DrIFT-2.2.3/src/DataP.lhs0000644000076400007640000001062010753606147011657 00000000000000Adaptation and extension of a parser for data definitions given in appendix of G. Huttons's paper - Monadic Parser Combinators. Parser does not accept infix data constructors. This is a shortcoming that needs to be fixed. >module DataP (Statement(..),Data(..),Type(..),Body(..), > Name,Var,Class,Constructor, > datadecl,newtypedecl) >where >import ParseLib2 >import Char >import List >import Monad >data Statement = DataStmt | NewTypeStmt deriving (Eq,Show) >data Data = D { name :: Name, -- type name > constraints :: [(Class,Var)], > vars :: [Var], -- Parameters > body :: [Body], > derives :: [Class], -- derived classes > statement :: Statement} > | Directive > | TypeName Name > deriving (Eq,Show) >data Body = Body { constructor :: Constructor, > labels :: [Name], > types :: [Type]} deriving (Eq,Show) >type Name = String >type Var = String >type Class = String >type Constructor = String >---------------------------------------------------------------------------- > >datadecl :: Parser Data >datadecl = do > symbol "data" > con <- opt constraint > x <- constructorP > xs <- many variable > symbol "=" > b <- (conrecdecl +++ infixdecl) `sepby1` symbol "|" > d <- opt deriveP > return $D x con xs b d DataStmt >newtypedecl :: Parser Data >newtypedecl = do > symbol "newtype" > con <- opt constraint > x <- constructorP > xs <- many variable > symbol "=" > b <- conrecdecl > d <- opt deriveP > return $ D x con xs [b] d NewTypeStmt >--------------------------------------------------------------------------- >constructorP = token $ > do {x <- upper;xs <- many alphanum;return (x:xs)} +++ do > string "(:" > y <- many1 $ sat (\x -> (not . isAlphaNum) x && (not . isSpace) x && (not . (== ')')) x ) > char ')' > return ("(:" ++ y ++ ")") > >infixconstr = token $ do > x <- char ':' > y <- many1 $ sat (\x -> (not . isAlphaNum) x && (not . isSpace) x) > return (x:y) > >variable = identifier [ "data","deriving","newtype", "type", > "instance", "class", "module", "import", > "infixl", "infix","infixr", "default"] >condecl = do > x <- constructorP > ts <- many type2 > return $ Body x [] ts >conrecdecl = do > x <- constructorP > (ls,ts) <- record +++ fmap (\a -> ([],a)) (many type2) > return $ Body x ls ts >-- haven't worked infixes into the program yet, as they cause problems >-- throughout >infixdecl = do > t1 <- type2 > x <- infixconstr > ts <- many1 type2 > return $ Body ("(" ++ x ++ ")") [] (t1:ts) >record = do > symbol "{" > (ls,ts) <- fmap unzip $ rectype `sepby1` symbol "," > symbol "}" > return (ls,ts) >constraint = do{x <- constrs; symbol "=>"; return x} > where > constrs = fmap (\x -> [x]) one +++ > bracket (symbol "(") (one `sepby` symbol ",") (symbol ")") > one = do{c <- constructorP; v <- variable; return (c,v)} >deriveP = do{symbol "deriving"; one +++ more} > where > one = fmap (\x -> [x]) constructorP -- well, it has the same form > more = bracket (symbol "(") > (((type0 >>= return . show)) `sepby` symbol ",") > (symbol ")") >--------------------------------------------------------------------------- >data Type = Arrow Type Type -- fn > | LApply Type [Type] -- proper application > | Var String -- variable > | Con String -- constructor > | Tuple [Type] -- tuple > | List Type -- list > deriving (Eq) >instance Show Type where > show (Var s) = s > show (Con s) = s > show (Tuple ts) = "(" ++ concat (intersperse "," (map show ts)) ++ ")" > show (List t) = "[" ++ show t ++ "]" > show (Arrow a b) = show a ++ " -> " ++ show b > show (LApply t ts) = concat $ intersperse " " (map show (t:ts)) >type0 :: Parser Type >type0 = type1 `chainr1` fmap (const Arrow) (symbol "->") >--type1 = type2 `chainl1` (return Apply) >type1 = (do c <- con > as <- many1 type2 > return (LApply c as)) +++ > type2 >type2 = (char '!') +++ return '!' >> var +++ con +++ list +++ tuple >var = fmap Var variable >con = fmap Con constructorP >list = fmap List $ bracket (symbol "[") > type0 > (symbol "]") >tuple = fmap f $ bracket (symbol "(") > (type0 `sepby` symbol ",") > (symbol ")") > where f [t] = t > f ts = Tuple ts >--record entry >rectype :: Parser (String,Type) >rectype = do > s <- variable > symbol "::" > t <- type0 > return (s,t) DrIFT-2.2.3/src/PreludData.hs0000644000076400007640000000245110753606147012542 00000000000000module PreludData where import DataP -- data types from prelude, so we can derive things for these -- as needed without parsing the whole prelude -- users may want to add commonly-used datatypes to this list, to save -- repeatedly searching for a type. The list data is generated using the -- 'test' rule on the required datatypes. preludeData :: [Data] preludeData = [ D{name="Bool",constraints=[],vars=[],body=[ Body{constructor="False",labels=[],types=[]}, Body{constructor="True",labels=[],types=[]}] ,derives=["Eq", "Ord", "Ix", "Enum", "Read", "Show", "Bounded"] ,statement=DataStmt}, D{name="Maybe",constraints=[],vars=["a"],body=[ Body{constructor="Just",labels=[],types=[Var "a"]}, Body{constructor="Nothing",labels=[],types=[]}] , derives=["Eq", "Ord", "Read", "Show"],statement=DataStmt}, D{name="Either",constraints=[],vars=["a", "b"],body=[ Body{constructor="Left",labels=[],types=[Var "a"]}, Body{constructor="Right",labels=[],types=[Var "b"]}], derives=["Eq", "Ord", "Read", "Show"],statement=DataStmt}, D{name="Ordering",constraints=[],vars=[],body=[ Body{constructor="LT",labels=[],types=[]}, Body{constructor="EQ",labels=[],types=[]}, Body{constructor="GT",labels=[],types=[]}], derives=["Eq", "Ord", "Ix", "Enum", "Read", "Show", "Bounded"], statement=DataStmt}] DrIFT-2.2.3/src/GenUtil.hs0000644000076400007640000004250310753606147012066 00000000000000 -- $Id: GenUtil.hs,v 1.30 2004/12/01 23:58:27 john Exp $ -- arch-tag: 835e46b7-8ffd-40a0-aaf9-326b7e347760 -- Copyright (c) 2002 John Meacham (john@foo.net) -- -- Permission is hereby granted, free of charge, to any person obtaining a -- copy of this software and associated documentation files (the -- "Software"), to deal in the Software without restriction, including -- without limitation the rights to use, copy, modify, merge, publish, -- distribute, sublicense, and/or sell copies of the Software, and to -- permit persons to whom the Software is furnished to do so, subject to -- the following conditions: -- -- The above copyright notice and this permission notice shall be included -- in all copies or substantial portions of the Software. -- -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -- IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -- CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -- TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -- SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---------------------------------------- -- | This is a collection of random useful utility functions written in pure -- Haskell 98. In general, it trys to conform to the naming scheme put forth -- the haskell prelude and fill in the obvious omissions, as well as provide -- useful routines in general. To ensure maximum portability, no instances are -- exported so it may be added to any project without conflicts. ---------------------------------------- module GenUtil( -- * Functions -- ** Error reporting putErr,putErrLn,putErrDie, -- ** Simple deconstruction fromLeft,fromRight,fsts,snds,splitEither,rights,lefts, -- ** System routines exitSuccess, System.exitFailure, epoch, lookupEnv,endOfTime, -- ** Random routines repMaybe, liftT2, liftT3, liftT4, snub, snubFst, sortFst, groupFst, foldl', fmapLeft,fmapRight,isDisjoint,isConjoint, groupUnder, sortUnder, sortGroupUnder, sortGroupUnderF, -- ** Monad routines repeatM, repeatM_, replicateM, replicateM_, maybeToMonad, toMonadM, ioM, ioMp, foldlM, foldlM_, foldl1M, foldl1M_, -- ** Text Routines -- *** Quoting shellQuote, simpleQuote, simpleUnquote, -- *** Random concatInter, powerSet, indentLines, buildTableLL, buildTableRL, randomPermute, randomPermuteIO, trimBlankLines, paragraph, paragraphBreak, expandTabs, chunk, chunkText, rtup, triple, fromEither, mapFst, mapSnd, mapFsts, mapSnds, tr, readHex, overlaps, showDuration, getArgContents, readM, readsM, split, tokens, -- * Classes UniqueProducer(..) ) where import Char(isAlphaNum, isSpace, toLower, ord) import List(group,sort) import List(intersperse, sortBy, groupBy) import Monad import qualified IO import qualified System import Random(StdGen, newStdGen, Random(randomR)) import Time {-# SPECIALIZE snub :: [String] -> [String] #-} {-# SPECIALIZE snub :: [Int] -> [Int] #-} -- | sorted nub of list, much more efficient than nub, but doesnt preserve ordering. snub :: Ord a => [a] -> [a] snub = map head . group . sort -- | sorted nub of list of tuples, based solely on the first element of each tuple. snubFst :: Ord a => [(a,b)] -> [(a,b)] snubFst = map head . groupBy (\(x,_) (y,_) -> x == y) . sortBy (\(x,_) (y,_) -> compare x y) -- | sort list of tuples, based on first element of each tuple. sortFst :: Ord a => [(a,b)] -> [(a,b)] sortFst = sortBy (\(x,_) (y,_) -> compare x y) -- | group list of tuples, based only on equality of the first element of each tuple. groupFst :: Eq a => [(a,b)] -> [[(a,b)]] groupFst = groupBy (\(x,_) (y,_) -> x == y) groupUnder f = groupBy (\x y -> f x == f y) sortUnder f = sortBy (\x y -> f x `compare` f y) sortGroupUnder f = groupUnder f . sortUnder f sortGroupUnderF f xs = [ (f x, xs) | xs@(x:_) <- sortGroupUnder f xs] -- | write string to standard error putErr :: String -> IO () putErr = IO.hPutStr IO.stderr -- | write string and newline to standard error putErrLn :: String -> IO () putErrLn s = putErr (s ++ "\n") -- | write string and newline to standard error, -- then exit program with failure. putErrDie :: String -> IO a putErrDie s = putErrLn s >> System.exitFailure -- | exit program successfully. 'exitFailure' is -- also exported from System. exitSuccess :: IO a exitSuccess = System.exitWith System.ExitSuccess {-# INLINE fromRight #-} fromRight :: Either a b -> b fromRight (Right x) = x fromRight _ = error "fromRight" {-# INLINE fromLeft #-} fromLeft :: Either a b -> a fromLeft (Left x) = x fromLeft _ = error "fromLeft" -- | recursivly apply function to value until it returns Nothing repMaybe :: (a -> Maybe a) -> a -> a repMaybe f e = case f e of Just e' -> repMaybe f e' Nothing -> e {-# INLINE liftT2 #-} {-# INLINE liftT3 #-} {-# INLINE liftT4 #-} liftT4 (f1,f2,f3,f4) (v1,v2,v3,v4) = (f1 v1, f2 v2, f3 v3, f4 v4) liftT3 (f,g,h) (x,y,z) = (f x, g y, h z) -- | apply functions to values inside a tupele. 'liftT3' and 'liftT4' also exist. liftT2 :: (a -> b, c -> d) -> (a,c) -> (b,d) liftT2 (f,g) (x,y) = (f x, g y) -- | class for monads which can generate -- unique values. class Monad m => UniqueProducer m where -- | produce a new unique value newUniq :: m Int -- peekUniq :: m Int -- modifyUniq :: (Int -> Int) -> m () -- newUniq = do -- v <- peekUniq -- modifyUniq (+1) -- return v rtup a b = (b,a) triple a b c = (a,b,c) -- | the standard unix epoch epoch :: ClockTime epoch = toClockTime $ CalendarTime { ctYear = 1970, ctMonth = January, ctDay = 0, ctHour = 0, ctMin = 0, ctSec = 0, ctTZ = 0, ctPicosec = 0, ctWDay = undefined, ctYDay = undefined, ctTZName = undefined, ctIsDST = undefined} -- | an arbitrary time in the future endOfTime :: ClockTime endOfTime = toClockTime $ CalendarTime { ctYear = 2020, ctMonth = January, ctDay = 0, ctHour = 0, ctMin = 0, ctSec = 0, ctTZ = 0, ctPicosec = 0, ctWDay = undefined, ctYDay = undefined, ctTZName = undefined, ctIsDST = undefined} {-# INLINE fsts #-} -- | take the fst of every element of a list fsts :: [(a,b)] -> [a] fsts = map fst {-# INLINE snds #-} -- | take the snd of every element of a list snds :: [(a,b)] -> [b] snds = map snd {-# INLINE repeatM #-} {-# SPECIALIZE repeatM :: IO a -> IO [a] #-} repeatM :: Monad m => m a -> m [a] repeatM x = sequence $ repeat x {-# INLINE repeatM_ #-} {-# SPECIALIZE repeatM_ :: IO a -> IO () #-} repeatM_ :: Monad m => m a -> m () repeatM_ x = sequence_ $ repeat x {-# INLINE replicateM #-} {-# SPECIALIZE replicateM :: Int -> IO a -> IO [a] #-} replicateM :: Monad m => Int -> m a -> m [a] replicateM n x = sequence $ replicate n x {-# INLINE replicateM_ #-} {-# SPECIALIZE replicateM_ :: Int -> IO a -> IO () #-} replicateM_ :: Monad m => Int -> m a -> m () replicateM_ n x = sequence_ $ replicate n x {-# SPECIALIZE maybeToMonad :: Maybe a -> IO a #-} -- | convert a maybe to an arbitrary failable monad maybeToMonad :: Monad m => Maybe a -> m a maybeToMonad (Just x) = return x maybeToMonad Nothing = fail "Nothing" toMonadM :: Monad m => m (Maybe a) -> m a toMonadM action = join $ liftM maybeToMonad action foldlM :: Monad m => (a -> b -> m a) -> a -> [b] -> m a foldlM f v (x:xs) = (f v x) >>= \a -> foldlM f a xs foldlM _ v [] = return v foldl1M :: Monad m => (a -> a -> m a) -> [a] -> m a foldl1M f (x:xs) = foldlM f x xs foldl1M _ _ = error "foldl1M" foldlM_ :: Monad m => (a -> b -> m a) -> a -> [b] -> m () foldlM_ f v xs = foldlM f v xs >> return () foldl1M_ ::Monad m => (a -> a -> m a) -> [a] -> m () foldl1M_ f xs = foldl1M f xs >> return () -- | partition a list of eithers. splitEither :: [Either a b] -> ([a],[b]) splitEither (r:rs) = case splitEither rs of (xs,ys) -> case r of Left x -> (x:xs,ys) Right y -> (xs,y:ys) splitEither [] = ([],[]) fromEither :: Either a a -> a fromEither (Left x) = x fromEither (Right x) = x {-# INLINE mapFst #-} {-# INLINE mapSnd #-} mapFst f (x,y) = (f x, y) mapSnd g (x,y) = ( x,g y) {-# INLINE mapFsts #-} {-# INLINE mapSnds #-} mapFsts f xs = [(f x, y) | (x,y) <- xs] mapSnds g xs = [(x, g y) | (x,y) <- xs] {-# INLINE rights #-} -- | take just the rights rights :: [Either a b] -> [b] rights xs = [x | Right x <- xs] {-# INLINE lefts #-} -- | take just the lefts lefts :: [Either a b] -> [a] lefts xs = [x | Left x <- xs] ioM :: Monad m => IO a -> IO (m a) ioM action = catch (fmap return action) (\e -> return (fail (show e))) ioMp :: MonadPlus m => IO a -> IO (m a) ioMp action = catch (fmap return action) (\_ -> return mzero) -- | reformat a string to not be wider than a given width, breaking it up -- between words. paragraph :: Int -> String -> String paragraph maxn xs = drop 1 (f maxn (words xs)) where f n (x:xs) | lx < n = (' ':x) ++ f (n - lx) xs where lx = length x + 1 f _ (x:xs) = '\n': (x ++ f (maxn - length x) xs) f _ [] = "\n" chunk :: Int -> [a] -> [[a]] chunk mw s | length s < mw = [s] chunk mw s = case splitAt mw s of (a,b) -> a : chunk mw b chunkText :: Int -> String -> String chunkText mw s = concatMap (unlines . chunk mw) $ lines s {- paragraphBreak :: Int -> String -> String paragraphBreak maxn xs = unlines (map ( unlines . map (unlines . chunk maxn) . lines . f maxn ) $ lines xs) where f _ "" = "" f n xs | length ss > 0 = if length ss + r rs > n then '\n':f maxn rs else ss where (ss,rs) = span isSpace xs f n xs = ns ++ f (n - length ns) rs where (ns,rs) = span (not . isSpace) xs r xs = length $ fst $ span (not . isSpace) xs -} paragraphBreak :: Int -> String -> String paragraphBreak maxn xs = unlines $ (map f) $ lines xs where f s | length s <= maxn = s f s | isSpace (head b) = a ++ "\n" ++ f (dropWhile isSpace b) | all (not . isSpace) a = a ++ "\n" ++ f b | otherwise = reverse (dropWhile isSpace sa) ++ "\n" ++ f (reverse ea ++ b) where (ea, sa) = span (not . isSpace) $ reverse a (a,b) = splitAt maxn s expandTabs' :: Int -> Int -> String -> String expandTabs' 0 _ s = filter (/= '\t') s expandTabs' sz off ('\t':s) = replicate len ' ' ++ expandTabs' sz (off + len) s where len = (sz - (off `mod` sz)) expandTabs' sz _ ('\n':s) = '\n': expandTabs' sz 0 s expandTabs' sz off (c:cs) = c: expandTabs' sz (off + 1) cs expandTabs' _ _ "" = "" -- | expand tabs into spaces in a string expandTabs s = expandTabs' 8 0 s tr :: String -> String -> String -> String tr as bs s = map (f as bs) s where f (a:_) (b:_) c | a == c = b f (_:as) (_:bs) c = f as bs c f [] [] c = c f _ _ _ = error "invalid tr" -- | quote strings 'rc' style. single quotes protect any characters between -- them, to get an actual single quote double it up. Inverse of 'simpleUnquote' simpleQuote :: [String] -> String simpleQuote ss = unwords (map f ss) where f s | any isBad s = "'" ++ dquote s ++ "'" f s = s dquote s = concatMap (\c -> if c == '\'' then "''" else [c]) s isBad c = isSpace c || c == '\'' -- | inverse of 'simpleQuote' simpleUnquote :: String -> [String] simpleUnquote s = f (dropWhile isSpace s) where f [] = [] f ('\'':xs) = case quote' "" xs of (x,y) -> x:f (dropWhile isSpace y) f xs = case span (not . isSpace) xs of (x,y) -> x:f (dropWhile isSpace y) quote' a ('\'':'\'':xs) = quote' ('\'':a) xs quote' a ('\'':xs) = (reverse a, xs) quote' a (x:xs) = quote' (x:a) xs quote' a [] = (reverse a, "") -- | quote a set of strings as would be appropriate to pass them as -- arguments to a 'sh' style shell shellQuote :: [String] -> String shellQuote ss = unwords (map f ss) where f s | any (not . isGood) s = "'" ++ dquote s ++ "'" f s = s dquote s = concatMap (\c -> if c == '\'' then "'\\''" else [c]) s isGood c = isAlphaNum c || c `elem` "@/." -- | looks up an enviornment variable and returns it in a 'MonadPlus' rather -- than raising an exception if the variable is not set. lookupEnv :: MonadPlus m => String -> IO (m String) lookupEnv s = catch (fmap return $ System.getEnv s) (\e -> if IO.isDoesNotExistError e then return mzero else ioError e) {-# SPECIALIZE fmapLeft :: (a -> c) -> [(Either a b)] -> [(Either c b)] #-} fmapLeft :: Functor f => (a -> c) -> f (Either a b) -> f (Either c b) fmapLeft fn = fmap f where f (Left x) = Left (fn x) f (Right x) = Right x {-# SPECIALIZE fmapRight :: (b -> c) -> [(Either a b)] -> [(Either a c)] #-} fmapRight :: Functor f => (b -> c) -> f (Either a b) -> f (Either a c) fmapRight fn = fmap f where f (Left x) = Left x f (Right x) = Right (fn x) {-# SPECIALIZE isDisjoint :: [String] -> [String] -> Bool #-} {-# SPECIALIZE isConjoint :: [String] -> [String] -> Bool #-} {-# SPECIALIZE isDisjoint :: [Int] -> [Int] -> Bool #-} {-# SPECIALIZE isConjoint :: [Int] -> [Int] -> Bool #-} -- | set operations on lists. (slow!) isDisjoint, isConjoint :: Eq a => [a] -> [a] -> Bool isConjoint xs ys = or [x == y | x <- xs, y <- ys] isDisjoint xs ys = not (isConjoint xs ys) -- | 'concat' composed with 'List.intersperse'. concatInter :: String -> [String] -> String concatInter x = concat . (intersperse x) -- | place spaces before each line in string. indentLines :: Int -> String -> String indentLines n s = unlines $ map (replicate n ' ' ++)$ lines s -- | trim blank lines at beginning and end of string trimBlankLines :: String -> String trimBlankLines cs = unlines $ reverse (tb $ reverse (tb (lines cs))) where tb = dropWhile (all isSpace) buildTableRL :: [(String,String)] -> [String] buildTableRL ps = map f ps where f (x,"") = x f (x,y) = replicate (bs - length x) ' ' ++ x ++ replicate 4 ' ' ++ y bs = maximum (map (length . fst) [ p | p@(_,_:_) <- ps ]) buildTableLL :: [(String,String)] -> [String] buildTableLL ps = map f ps where f (x,y) = x ++ replicate (bs - length x) ' ' ++ replicate 4 ' ' ++ y bs = maximum (map (length . fst) ps) {-# INLINE foldl' #-} -- | strict version of 'foldl' foldl' :: (a -> b -> a) -> a -> [b] -> a foldl' _ a [] = a foldl' f a (x:xs) = (foldl' f $! f a x) xs -- | randomly permute a list, using the standard random number generator. randomPermuteIO :: [a] -> IO [a] randomPermuteIO xs = newStdGen >>= \g -> return (randomPermute g xs) -- | randomly permute a list given a RNG randomPermute :: StdGen -> [a] -> [a] randomPermute _ [] = [] randomPermute gen xs = (head tl) : randomPermute gen' (hd ++ tail tl) where (idx, gen') = randomR (0,length xs - 1) gen (hd, tl) = splitAt idx xs -- | compute the power set of a list powerSet :: [a] -> [[a]] powerSet [] = [[]] powerSet (x:xs) = xss /\/ map (x:) xss where xss = powerSet xs -- | interleave two lists lazily, alternating elements from them. This can be used instead of concatination to avoid space leaks in certain situations. (/\/) :: [a] -> [a] -> [a] [] /\/ ys = ys (x:xs) /\/ ys = x : (ys /\/ xs) readHexChar a | a >= '0' && a <= '9' = return $ ord a - ord '0' readHexChar a | z >= 'a' && z <= 'f' = return $ 10 + ord z - ord 'a' where z = toLower a readHexChar x = fail $ "not hex char: " ++ [x] readHex :: Monad m => String -> m Int readHex [] = fail "empty string" readHex cs = mapM readHexChar cs >>= \cs' -> return (rh $ reverse cs') where rh (c:cs) = c + 16 * (rh cs) rh [] = 0 {-# SPECIALIZE overlaps :: (Int,Int) -> (Int,Int) -> Bool #-} -- | determine if two closed intervals overlap at all. overlaps :: Ord a => (a,a) -> (a,a) -> Bool (a,_) `overlaps` (_,y) | y < a = False (_,b) `overlaps` (x,_) | b < x = False _ `overlaps` _ = True -- | translate a number of seconds to a string representing the duration expressed. showDuration :: Integral a => a -> String showDuration x = st "d" dayI ++ st "h" hourI ++ st "m" minI ++ show secI ++ "s" where (dayI, hourI) = divMod hourI' 24 (hourI', minI) = divMod minI' 60 (minI',secI) = divMod x 60 st _ 0 = "" st c n = show n ++ c -- | behave like while(<>) in perl, go through the argument list, reading the -- concation of each file name mentioned or stdin if '-' is on it. If no -- arguments are given, read stdin. getArgContents = do as <- System.getArgs let f "-" = getContents f fn = readFile fn cs <- mapM f as if null as then getContents else return $ concat cs readM :: (Monad m, Read a) => String -> m a readM cs = case [x | (x,t) <- reads cs, ("","") <- lex t] of [x] -> return x [] -> fail "readM: no parse" _ -> fail "readM: ambiguous parse" readsM :: (Monad m, Read a) => String -> m (a,String) readsM cs = case readsPrec 0 cs of [(x,s)] -> return (x,s) _ -> fail "cannot readsM" -- | Splits a list into components delimited by separators, where the -- predicate returns True for a separator element. The resulting -- components do not contain the separators. Two adjacent separators -- result in an empty component in the output. eg. -- -- @ -- > split (=='a') "aabbaca" -- ["","","bb","c",""] -- @ split :: (a -> Bool) -> [a] -> [[a]] split p s = case rest of [] -> [chunk] _:rest -> chunk : split p rest where (chunk, rest) = break p s -- | Like 'split', except that sequences of adjacent separators are -- treated as a single separator. eg. -- -- @ -- > tokens (=='a') "aabbaca" -- ["bb","c"] -- @ tokens :: (a -> Bool) -> [a] -> [[a]] tokens p = filter (not.null) . split p DrIFT-2.2.3/README.old0000644000076400007640000000265410753606147011034 00000000000000This tar file contains: Makefile - to build DrIFT *.hs,*.lhs - DrIFT source code example/ - simple example files to test DrIFT on. docs/ - documentation in texinfo format History ------- Development Taken over by John Meacham (john@foo.net) in April of 2002 see ChangeLog for changes since 1.1 (Changes by Malcolm.Wallace@cs.york.ac.uk, 1999) Updated sources to Haskell 98. Added derivation of class Binary and class Haskell2Xml. Changed Main.main to place results on stdout instead of overwriting original file. Have not tried using Makefile recently; I use hmake instead. Building DrIFT -------------- depending on your system you should type one of the following: hmake DrIFT make Installation ------------ copy the 'DrIFT' executable to somewhere on your path set DERIVEPATH to the list of directories you wish to search for for modules / interfaces. DERIVEPATH is quite fussy about the format the list should take :- * each path should be separated by ':' * no space inserted anywhere * no final '/' on the end of a path e.g. good - /users/grad/nww/share/hugs/lib:/users/grad/nww/share/hugs/lib/hugs bad - /users/grad/nww/share/hugs/lib/:/users/grad/nww/share/hugs/lib/hugs/ Running DrIFT -------------- DrIFT foo.drift.hs -o foo.hs DrIFT foo.hs -r -o foo_derivations.hs Homepage -------- http://homer.netmar.com/~john/computer/haskell/DrIFT/ Authors ------- Noel Winstanley Malcolm Wallace Joost Visser John Meacham DrIFT-2.2.3/configure.ac0000644000076400007640000000135410753606147011661 00000000000000 AC_INIT([DrIFT],[2.2.3],john@repetae.net, [DrIFT]) AC_CONFIG_SRCDIR(src/DrIFT.hs) AC_CONFIG_MACRO_DIR(ac-macros) AC_CONFIG_AUX_DIR(ac-macros) AM_INIT_AUTOMAKE([foreign no-dependencies subdir-objects]) #AC_CANONICAL_HOST #AC_PROG_CC dnl Let aclocal use macros in ./config directory ACLOCAL="${ACLOCAL} -I ./ac-macros" AC_PROGRAM_REQUIRE(hc,ghc, [ --with-hc= Specify location of ghc.]) AC_ARG_WITH(hcflags, [ --with-hcflags=HCFLAGS specify flags for Haskell compiler], [HCFLAGS=$withval]) AC_PROG_INSTALL AC_PATH_PROGS(SH, sh) AC_SUBST(HC) AC_SUBST(HCFLAGS) AC_SUBST(PACKAGE) AC_OUTPUT( Makefile src/Makefile example/Makefile src/Version.hs DrIFT.spec drift-ghc ) DrIFT-2.2.3/Makefile.in0000644000076400007640000007356010753606466011454 00000000000000# Makefile.in generated by automake 1.10 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : subdir = . DIST_COMMON = README $(am__configure_deps) $(srcdir)/DrIFT.spec.in \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/docs/stamp-vti $(srcdir)/docs/version.texi \ $(srcdir)/drift-ghc.in $(top_srcdir)/configure AUTHORS \ ac-macros/install-sh ac-macros/mdate-sh ac-macros/missing \ ac-macros/texinfo.tex ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/ac-macros/acincludepackage.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_CLEAN_FILES = DrIFT.spec drift-ghc am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(infodir)" binSCRIPT_INSTALL = $(INSTALL_SCRIPT) SCRIPTS = $(bin_SCRIPTS) depcomp = am__depfiles_maybe = SOURCES = DIST_SOURCES = am__dirstamp = $(am__leading_dot)dirstamp INFO_DEPS = $(srcdir)/docs/drift.info TEXINFO_TEX = $(top_srcdir)/ac-macros/texinfo.tex am__TEXINFO_TEX_DIR = $(top_srcdir)/ac-macros DVIS = docs/drift.dvi PDFS = docs/drift.pdf PSS = docs/drift.ps HTMLS = docs/drift.html TEXINFOS = docs/drift.texi TEXI2DVI = texi2dvi TEXI2PDF = $(TEXI2DVI) --pdf --batch MAKEINFOHTML = $(MAKEINFO) --html AM_MAKEINFOHTMLFLAGS = $(AM_MAKEINFOFLAGS) DVIPS = dvips RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ { test ! -d $(distdir) \ || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -fr $(distdir); }; } DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best distuninstallcheck_listfiles = find . -type f -print distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ HC = @HC@ HCFLAGS = @HCFLAGS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SH = @SH@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ am__leading_dot = @am__leading_dot@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = no-dependencies foreign ACLOCAL_AMFLAGS = -I ac-macros info_TEXINFOS = docs/drift.texi SUBDIRS = src example EXTRA_DIST = configure.ac \ Changelog README.old drift-ghc.in code \ code/FunctorM.hs code/GhcBinary.hs code/README.txt \ ac-macros/acincludepackage.m4 LICENSE docs/drift.html bin_SCRIPTS = drift-ghc PUBLISH_DIR = /home/john/public_html/computer/haskell/DrIFT PUBLISH_FILES = docs/drift.html docs/drift.ps drift-list.txt BINDISTNAME = @PACKAGE_NAME@-@VERSION@-`uname -s`-`uname -m` all: all-recursive .SUFFIXES: .SUFFIXES: .dvi .ps am--refresh: @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign --ignore-deps'; \ cd $(srcdir) && $(AUTOMAKE) --foreign --ignore-deps \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign --ignore-deps Makefile'; \ cd $(top_srcdir) && \ $(AUTOMAKE) --foreign --ignore-deps Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: $(am__configure_deps) cd $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) DrIFT.spec: $(top_builddir)/config.status $(srcdir)/DrIFT.spec.in cd $(top_builddir) && $(SHELL) ./config.status $@ drift-ghc: $(top_builddir)/config.status $(srcdir)/drift-ghc.in cd $(top_builddir) && $(SHELL) ./config.status $@ install-binSCRIPTS: $(bin_SCRIPTS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" @list='$(bin_SCRIPTS)'; for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f $$d$$p; then \ f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \ echo " $(binSCRIPT_INSTALL) '$$d$$p' '$(DESTDIR)$(bindir)/$$f'"; \ $(binSCRIPT_INSTALL) "$$d$$p" "$(DESTDIR)$(bindir)/$$f"; \ else :; fi; \ done uninstall-binSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(bin_SCRIPTS)'; for p in $$list; do \ f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \ echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ rm -f "$(DESTDIR)$(bindir)/$$f"; \ done docs/$(am__dirstamp): @$(MKDIR_P) docs @: > docs/$(am__dirstamp) $(srcdir)/docs/drift.info: docs/drift.texi $(srcdir)/docs/version.texi restore=: && backupdir="$(am__leading_dot)am$$$$" && \ am__cwd=`pwd` && cd $(srcdir) && \ rm -rf $$backupdir && mkdir $$backupdir && \ if ($(MAKEINFO) --version) >/dev/null 2>&1; then \ for f in $@ $@-[0-9] $@-[0-9][0-9] $(@:.info=).i[0-9] $(@:.info=).i[0-9][0-9]; do \ if test -f $$f; then mv $$f $$backupdir; restore=mv; else :; fi; \ done; \ else :; fi && \ cd "$$am__cwd"; \ if $(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I docs -I $(srcdir)/docs \ -o $@ $(srcdir)/docs/drift.texi; \ then \ rc=0; \ cd $(srcdir); \ else \ rc=$$?; \ cd $(srcdir) && \ $$restore $$backupdir/* `echo "./$@" | sed 's|[^/]*$$||'`; \ fi; \ rm -rf $$backupdir; exit $$rc docs/drift.dvi: docs/drift.texi $(srcdir)/docs/version.texi docs/$(am__dirstamp) TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I docs -I $(srcdir)/docs' \ $(TEXI2DVI) -o $@ `test -f 'docs/drift.texi' || echo '$(srcdir)/'`docs/drift.texi docs/drift.pdf: docs/drift.texi $(srcdir)/docs/version.texi docs/$(am__dirstamp) TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I docs -I $(srcdir)/docs' \ $(TEXI2PDF) -o $@ `test -f 'docs/drift.texi' || echo '$(srcdir)/'`docs/drift.texi $(srcdir)/docs/version.texi: $(srcdir)/docs/stamp-vti $(srcdir)/docs/stamp-vti: docs/drift.texi $(top_srcdir)/configure test -f docs/$(am__dirstamp) || $(MAKE) $(AM_MAKEFLAGS) docs/$(am__dirstamp) @(dir=.; test -f ./docs/drift.texi || dir=$(srcdir); \ set `$(SHELL) $(top_srcdir)/ac-macros/mdate-sh $$dir/docs/drift.texi`; \ echo "@set UPDATED $$1 $$2 $$3"; \ echo "@set UPDATED-MONTH $$2 $$3"; \ echo "@set EDITION $(VERSION)"; \ echo "@set VERSION $(VERSION)") > vti.tmp @cmp -s vti.tmp $(srcdir)/docs/version.texi \ || (echo "Updating $(srcdir)/docs/version.texi"; \ cp vti.tmp $(srcdir)/docs/version.texi) -@rm -f vti.tmp @cp $(srcdir)/docs/version.texi $@ mostlyclean-vti: -rm -f vti.tmp maintainer-clean-vti: -rm -f $(srcdir)/docs/stamp-vti $(srcdir)/docs/version.texi .dvi.ps: TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ $(DVIPS) -o $@ $< uninstall-dvi-am: @$(NORMAL_UNINSTALL) @list='$(DVIS)'; for p in $$list; do \ f=$(am__strip_dir) \ echo " rm -f '$(DESTDIR)$(dvidir)/$$f'"; \ rm -f "$(DESTDIR)$(dvidir)/$$f"; \ done uninstall-html-am: @$(NORMAL_UNINSTALL) @list='$(HTMLS)'; for p in $$list; do \ f=$(am__strip_dir) \ echo " rm -rf '$(DESTDIR)$(htmldir)/$$f'"; \ rm -rf "$(DESTDIR)$(htmldir)/$$f"; \ done uninstall-info-am: @$(PRE_UNINSTALL) @if test -d '$(DESTDIR)$(infodir)' && \ (install-info --version && \ install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \ list='$(INFO_DEPS)'; \ for file in $$list; do \ relfile=`echo "$$file" | sed 's|^.*/||'`; \ echo " install-info --info-dir='$(DESTDIR)$(infodir)' --remove '$(DESTDIR)$(infodir)/$$relfile'"; \ install-info --info-dir="$(DESTDIR)$(infodir)" --remove "$(DESTDIR)$(infodir)/$$relfile"; \ done; \ else :; fi @$(NORMAL_UNINSTALL) @list='$(INFO_DEPS)'; \ for file in $$list; do \ relfile=`echo "$$file" | sed 's|^.*/||'`; \ relfile_i=`echo "$$relfile" | sed 's|\.info$$||;s|$$|.i|'`; \ (if test -d "$(DESTDIR)$(infodir)" && cd "$(DESTDIR)$(infodir)"; then \ echo " cd '$(DESTDIR)$(infodir)' && rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]"; \ rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]; \ else :; fi); \ done uninstall-pdf-am: @$(NORMAL_UNINSTALL) @list='$(PDFS)'; for p in $$list; do \ f=$(am__strip_dir) \ echo " rm -f '$(DESTDIR)$(pdfdir)/$$f'"; \ rm -f "$(DESTDIR)$(pdfdir)/$$f"; \ done uninstall-ps-am: @$(NORMAL_UNINSTALL) @list='$(PSS)'; for p in $$list; do \ f=$(am__strip_dir) \ echo " rm -f '$(DESTDIR)$(psdir)/$$f'"; \ rm -f "$(DESTDIR)$(psdir)/$$f"; \ done dist-info: $(INFO_DEPS) @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ list='$(INFO_DEPS)'; \ for base in $$list; do \ case $$base in \ $(srcdir)/*) base=`echo "$$base" | sed "s|^$$srcdirstrip/||"`;; \ esac; \ if test -f $$base; then d=.; else d=$(srcdir); fi; \ base_i=`echo "$$base" | sed 's|\.info$$||;s|$$|.i|'`; \ for file in $$d/$$base $$d/$$base-[0-9] $$d/$$base-[0-9][0-9] $$d/$$base_i[0-9] $$d/$$base_i[0-9][0-9]; do \ if test -f $$file; then \ relfile=`expr "$$file" : "$$d/\(.*\)"`; \ test -f $(distdir)/$$relfile || \ cp -p $$file $(distdir)/$$relfile; \ else :; fi; \ done; \ done mostlyclean-aminfo: -rm -rf drift.aux drift.cp drift.cps drift.fn drift.fns drift.ky drift.kys \ drift.log drift.pg drift.pgs drift.tmp drift.toc drift.tp \ drift.tps drift.vr drift.vrs docs/drift.dvi docs/drift.pdf \ docs/drift.ps docs/drift.html maintainer-clean-aminfo: @list='$(INFO_DEPS)'; for i in $$list; do \ i_i=`echo "$$i" | sed 's|\.info$$||;s|$$|.i|'`; \ echo " rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]"; \ rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]; \ done # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) ' { files[$$0] = 1; } \ END { for (i in files) print i; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) ' { files[$$0] = 1; } \ END { for (i in files) print i; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$tags $$unique; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) ' { files[$$0] = 1; } \ END { for (i in files) print i; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && cd $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) $$here distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) $(am__remove_distdir) test -d $(distdir) || mkdir $(distdir) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ fi; \ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ else \ test -f $(distdir)/$$file \ || cp -p $$d/$$file $(distdir)/$$file \ || exit 1; \ fi; \ done list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ distdir=`$(am__cd) $(distdir) && pwd`; \ top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ (cd $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$top_distdir" \ distdir="$$distdir/$$subdir" \ am__remove_distdir=: \ am__skip_length_check=: \ distdir) \ || exit 1; \ fi; \ done $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$(top_distdir)" distdir="$(distdir)" \ dist-info -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r $(distdir) dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 $(am__remove_distdir) dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__remove_distdir) dist-shar: distdir shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__remove_distdir) dist dist-all: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir); chmod a+w $(distdir) mkdir $(distdir)/_build mkdir $(distdir)/_inst chmod a-w $(distdir) dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && cd $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck $(am__remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @cd $(distuninstallcheck_dir) \ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile $(INFO_DEPS) $(SCRIPTS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(infodir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -rm -f docs/$(am__dirstamp) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: $(DVIS) html: html-recursive html-am: $(HTMLS) info: info-recursive info-am: $(INFO_DEPS) install-data-am: install-info-am install-dvi: install-dvi-recursive install-dvi-am: $(DVIS) @$(NORMAL_INSTALL) test -z "$(dvidir)" || $(MKDIR_P) "$(DESTDIR)$(dvidir)" @list='$(DVIS)'; for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ f=$(am__strip_dir) \ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(dvidir)/$$f'"; \ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(dvidir)/$$f"; \ done install-exec-am: install-binSCRIPTS install-html: install-html-recursive install-html-am: $(HTMLS) @$(NORMAL_INSTALL) test -z "$(htmldir)" || $(MKDIR_P) "$(DESTDIR)$(htmldir)" @list='$(HTMLS)'; for p in $$list; do \ if test -f "$$p" || test -d "$$p"; then d=; else d="$(srcdir)/"; fi; \ f=$(am__strip_dir) \ if test -d "$$d$$p"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)/$$f'"; \ $(MKDIR_P) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \ echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \ $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f"; \ else \ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \ fi; \ done install-info: install-info-recursive install-info-am: $(INFO_DEPS) @$(NORMAL_INSTALL) test -z "$(infodir)" || $(MKDIR_P) "$(DESTDIR)$(infodir)" @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ list='$(INFO_DEPS)'; \ for file in $$list; do \ case $$file in \ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ esac; \ if test -f $$file; then d=.; else d=$(srcdir); fi; \ file_i=`echo "$$file" | sed 's|\.info$$||;s|$$|.i|'`; \ for ifile in $$d/$$file $$d/$$file-[0-9] $$d/$$file-[0-9][0-9] \ $$d/$$file_i[0-9] $$d/$$file_i[0-9][0-9] ; do \ if test -f $$ifile; then \ relfile=`echo "$$ifile" | sed 's|^.*/||'`; \ echo " $(INSTALL_DATA) '$$ifile' '$(DESTDIR)$(infodir)/$$relfile'"; \ $(INSTALL_DATA) "$$ifile" "$(DESTDIR)$(infodir)/$$relfile"; \ else : ; fi; \ done; \ done @$(POST_INSTALL) @if (install-info --version && \ install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \ list='$(INFO_DEPS)'; \ for file in $$list; do \ relfile=`echo "$$file" | sed 's|^.*/||'`; \ echo " install-info --info-dir='$(DESTDIR)$(infodir)' '$(DESTDIR)$(infodir)/$$relfile'";\ install-info --info-dir="$(DESTDIR)$(infodir)" "$(DESTDIR)$(infodir)/$$relfile" || :;\ done; \ else : ; fi install-man: install-pdf: install-pdf-recursive install-pdf-am: $(PDFS) @$(NORMAL_INSTALL) test -z "$(pdfdir)" || $(MKDIR_P) "$(DESTDIR)$(pdfdir)" @list='$(PDFS)'; for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ f=$(am__strip_dir) \ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(pdfdir)/$$f'"; \ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(pdfdir)/$$f"; \ done install-ps: install-ps-recursive install-ps-am: $(PSS) @$(NORMAL_INSTALL) test -z "$(psdir)" || $(MKDIR_P) "$(DESTDIR)$(psdir)" @list='$(PSS)'; for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ f=$(am__strip_dir) \ echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(psdir)/$$f'"; \ $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(psdir)/$$f"; \ done installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-aminfo \ maintainer-clean-generic maintainer-clean-vti mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-aminfo mostlyclean-generic mostlyclean-vti pdf: pdf-recursive pdf-am: $(PDFS) ps: ps-recursive ps-am: $(PSS) uninstall-am: uninstall-binSCRIPTS uninstall-dvi-am uninstall-html-am \ uninstall-info-am uninstall-pdf-am uninstall-ps-am .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ install-strip .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-generic \ ctags ctags-recursive dist dist-all dist-bzip2 dist-gzip \ dist-info dist-shar dist-tarZ dist-zip distcheck distclean \ distclean-generic distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-binSCRIPTS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-aminfo maintainer-clean-generic \ maintainer-clean-vti mostlyclean mostlyclean-aminfo \ mostlyclean-generic mostlyclean-vti pdf pdf-am ps ps-am tags \ tags-recursive uninstall uninstall-am uninstall-binSCRIPTS \ uninstall-dvi-am uninstall-html-am uninstall-info-am \ uninstall-pdf-am uninstall-ps-am docs/drift.html: docs/drift.texi texi2html -monolithic $< -o $@ bin-dist: (cd ./src; $(MAKE) bin-dist) mv ./src/$(BINDISTNAME).tar.gz . drift-list.txt: src/DrIFT src/DrIFT -l > drift-list.txt publish: drift-list.txt docs bin-dist dist # rpm rm -f -- $(PUBLISH_DIR)/drop/$(PACKAGE_NAME)-*-*-*.tar.gz # rm -f -- $(PUBLISH_DIR)/drop/$(PACKAGE)-*.rpm rm -f -- "$(PUBLISH_DIR)/drop/$(PACKAGE_NAME)-$(VERSION).tar.gz" cp -- "$(BINDISTNAME).tar.gz" "$(PUBLISH_DIR)/drop" cp -- "$(PACKAGE_NAME)-$(VERSION).tar.gz" "$(PUBLISH_DIR)/drop" cp -- $(PUBLISH_FILES) $(PUBLISH_DIR) # cp -- "$(HOME)/redhat/RPMS/i386/$(PACKAGE)-$(VERSION)-1.i386.rpm" "$(PUBLISH_DIR)/drop" # cp -- "$(HOME)/redhat/SRPMS/$(PACKAGE)-$(VERSION)-1.src.rpm" "$(PUBLISH_DIR)/drop" cp -- Changelog $(PUBLISH_DIR) make -C $(PUBLISH_DIR) || true rpm: depend $(PACKAGE_NAME).spec dist cp $(PACKAGE_NAME)-$(VERSION).tar.gz $(HOME)/var/rpm/SOURCES/ rpmbuild -ba $(PACKAGE_NAME).spec depend: $(MAKE) -C src depend docs: docs/drift.html docs/drift.ps .PHONY: docs depend rpm publish bin-dist # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: DrIFT-2.2.3/example/0000777000076400007640000000000010753607202011100 500000000000000DrIFT-2.2.3/example/README0000644000076400007640000000027210753606147011704 00000000000000Run derive over Example.hs to test everything is working properly. Compare with Example.output. Try some other example data structures in Artifical.hs BTree.hs Foo.lhs Xref.hs DrIFT-2.2.3/example/BTree.hs0000644000076400007640000000014310753606147012356 00000000000000import Binary data BTree k d = BTree Int [(k,[d])] [BTree k d] {-! derive : Binary !-} DrIFT-2.2.3/example/Example.hs0000644000076400007640000000233210753606147012752 00000000000000{- example script for derive -} module Example where import Foo {-! global : has,is !-} -- global to this module {-!for Data derive : update,Show,Read!-} -- stand alone comand syntax {-!for Foo derive : test, Show,Read !-} -- apply rules to imported type {-!for Maybe derive : test !-} -- apply rules to prelude type data Data = D { name :: Name, constraints :: [(Class,Var)], vars :: [Var], body :: [(Constructor,[(Name,Type)])], derive :: [Class], statement :: Statement} | FnType { name :: Name, constraints :: [(Class,Var)], fntype :: Type} | Fn { name :: Name, vars :: [Var]} | Directive {-!derive : test!-} -- abbreviated syntax {-!for Statement derive : Eq,Ord,Enum,Show,Read,Bounded !-} {-!for Type derive : Eq,Ord,Enum,Bounded,Read !-} data Statement = DataStmt | NewTypeStmt type Name = String type Var = String type Class = String type Constructor = String data Type = Arrow Type Type -- fn | Apply Type Type -- application | Var String -- variable | Con String -- constructor / type e.g. Int, Char | Tuple [Type] -- tuple | List Type -- list deriving Show data (Eq a) => G a b = F (a->b) b | H a a {-!derive: test !-} newtype Q = Q Int {-!derive:test!-} DrIFT-2.2.3/example/Foo.lhs0000644000076400007640000000006310753606147012255 00000000000000>module Foo where >data Foo = Foo | Bar | Bub DrIFT-2.2.3/example/Makefile.in0000644000076400007640000002013610753606417013072 00000000000000# Makefile.in generated by automake 1.10 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : subdir = example DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/ac-macros/acincludepackage.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_CLEAN_FILES = depcomp = am__depfiles_maybe = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ HC = @HC@ HCFLAGS = @HCFLAGS@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SH = @SH@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ am__leading_dot = @am__leading_dot@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SRCDIR = $(top_srcdir)/example EXTRA_DIST = \ README\ TestTerm.hs TestTerm.out.correct\ Artifical.hs BTree.hs Example.hs Foo.lhs Xref.hs CLEANFILES = *_check *.out all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign --ignore-deps example/Makefile'; \ cd $(top_srcdir) && \ $(AUTOMAKE) --foreign --ignore-deps example/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ fi; \ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ else \ test -f $(distdir)/$$file \ || cp -p $$d/$$file $(distdir)/$$file \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-exec-am: install-html: install-html-am install-info: install-info-am install-man: install-pdf: install-pdf-am install-ps: install-ps-am installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic distclean \ distclean-generic distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am #TESTS = TestTerm_check %_check: Makefile (\ echo "#! /bin/sh"; \ echo "set -e"; \ echo "DERIVEPATH=$(SRCDIR) $(top_builddir)/src/DrIFT $(SRCDIR)/$*.hs | sed -e '/{-#/d' > $*.out"; \ echo "diff $*.out $(SRCDIR)/$*.out.correct"; \ echo "$(RM) $*.out"; \ ) > $@ chmod +x $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: DrIFT-2.2.3/example/TestTerm.hs0000644000076400007640000000020210753606147013120 00000000000000module TestTerm where import TermRep {-! global : Term !-} data SortA = SortA1 SortB | SortA2 data SortB = SortB Integer SortA DrIFT-2.2.3/example/TestTerm.out.correct0000644000076400007640000000146510753606147014771 00000000000000{- Generated by DrIFT (Automatic class derivations for Haskell) -} module TestTerm where import TermRep {-! global : Term !-} data SortA = SortA1 SortB | SortA2 data SortB = SortB Integer SortA {-* Generated by DrIFT : Look, but Don't Touch. *-} instance Term SortA where explode (x::SortA) = TermRep (toDyn x, f x, g x) where f (SortA1 aa) = [explode aa] f SortA2 = [] g (SortA1 _) xs = case TermRep.fArgs xs of [aa] -> toDyn ((SortA1 (TermRep.fDyn aa))::SortA) g SortA2 xs = case TermRep.fArgs xs of [] -> toDyn ((SortA2)::SortA) instance Term SortB where explode (x::SortB) = TermRep (toDyn x, f x, g x) where f (SortB aa ab) = [explode aa,explode ab] g (SortB _ _) xs = case TermRep.fArgs xs of [aa,ab] -> toDyn ((SortB (TermRep.fDyn aa) (TermRep.fDyn ab))::SortB) -- Imported from other files :- DrIFT-2.2.3/example/Makefile.am0000644000076400007640000000070310753606147013057 00000000000000SRCDIR = $(top_srcdir)/example EXTRA_DIST = \ README\ TestTerm.hs TestTerm.out.correct\ Artifical.hs BTree.hs Example.hs Foo.lhs Xref.hs #TESTS = TestTerm_check %_check: Makefile (\ echo "#! /bin/sh"; \ echo "set -e"; \ echo "DERIVEPATH=$(SRCDIR) $(top_builddir)/src/DrIFT $(SRCDIR)/$*.hs | sed -e '/{-#/d' > $*.out"; \ echo "diff $*.out $(SRCDIR)/$*.out.correct"; \ echo "$(RM) $*.out"; \ ) > $@ chmod +x $@ CLEANFILES = *_check *.out DrIFT-2.2.3/example/Artifical.hs0000644000076400007640000000033610753606147013257 00000000000000module Example where import Binary data MyType a = ConsA Int (U (Int,[a])) | ConsB String --a | Red | Blue Int String {-(MyType a)-} [Int] {-!derive : Binary !-} DrIFT-2.2.3/example/Xref.hs0000644000076400007640000000017510753606147012266 00000000000000import Binary data Index a = Empty | Branch (Index a) string [a] (Index a) {-!derive : Binary!-} DrIFT-2.2.3/ac-macros/0000777000076400007640000000000010753607202011312 500000000000000DrIFT-2.2.3/ac-macros/mdate-sh0000755000076400007640000001255310753606417012673 00000000000000#!/bin/sh # Get modification time of a file or directory and pretty-print it. scriptversion=2005-06-29.22 # Copyright (C) 1995, 1996, 1997, 2003, 2004, 2005 Free Software # Foundation, Inc. # written by Ulrich Drepper , June 1995 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . case $1 in '') echo "$0: No file. Try \`$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: mdate-sh [--help] [--version] FILE Pretty-print the modification time of FILE. Report bugs to . EOF exit $? ;; -v | --v*) echo "mdate-sh $scriptversion" exit $? ;; esac # Prevent date giving response in another language. LANG=C export LANG LC_ALL=C export LC_ALL LC_TIME=C export LC_TIME # GNU ls changes its time format in response to the TIME_STYLE # variable. Since we cannot assume `unset' works, revert this # variable to its documented default. if test "${TIME_STYLE+set}" = set; then TIME_STYLE=posix-long-iso export TIME_STYLE fi save_arg1=$1 # Find out how to get the extended ls output of a file or directory. if ls -L /dev/null 1>/dev/null 2>&1; then ls_command='ls -L -l -d' else ls_command='ls -l -d' fi # A `ls -l' line looks as follows on OS/2. # drwxrwx--- 0 Aug 11 2001 foo # This differs from Unix, which adds ownership information. # drwxrwx--- 2 root root 4096 Aug 11 2001 foo # # To find the date, we split the line on spaces and iterate on words # until we find a month. This cannot work with files whose owner is a # user named `Jan', or `Feb', etc. However, it's unlikely that `/' # will be owned by a user whose name is a month. So we first look at # the extended ls output of the root directory to decide how many # words should be skipped to get the date. # On HPUX /bin/sh, "set" interprets "-rw-r--r--" as options, so the "x" below. set x`ls -l -d /` # Find which argument is the month. month= command= until test $month do shift # Add another shift to the command. command="$command shift;" case $1 in Jan) month=January; nummonth=1;; Feb) month=February; nummonth=2;; Mar) month=March; nummonth=3;; Apr) month=April; nummonth=4;; May) month=May; nummonth=5;; Jun) month=June; nummonth=6;; Jul) month=July; nummonth=7;; Aug) month=August; nummonth=8;; Sep) month=September; nummonth=9;; Oct) month=October; nummonth=10;; Nov) month=November; nummonth=11;; Dec) month=December; nummonth=12;; esac done # Get the extended ls output of the file or directory. set dummy x`eval "$ls_command \"\$save_arg1\""` # Remove all preceding arguments eval $command # Because of the dummy argument above, month is in $2. # # On a POSIX system, we should have # # $# = 5 # $1 = file size # $2 = month # $3 = day # $4 = year or time # $5 = filename # # On Darwin 7.7.0 and 7.6.0, we have # # $# = 4 # $1 = day # $2 = month # $3 = year or time # $4 = filename # Get the month. case $2 in Jan) month=January; nummonth=1;; Feb) month=February; nummonth=2;; Mar) month=March; nummonth=3;; Apr) month=April; nummonth=4;; May) month=May; nummonth=5;; Jun) month=June; nummonth=6;; Jul) month=July; nummonth=7;; Aug) month=August; nummonth=8;; Sep) month=September; nummonth=9;; Oct) month=October; nummonth=10;; Nov) month=November; nummonth=11;; Dec) month=December; nummonth=12;; esac case $3 in ???*) day=$1;; *) day=$3; shift;; esac # Here we have to deal with the problem that the ls output gives either # the time of day or the year. case $3 in *:*) set `date`; eval year=\$$# case $2 in Jan) nummonthtod=1;; Feb) nummonthtod=2;; Mar) nummonthtod=3;; Apr) nummonthtod=4;; May) nummonthtod=5;; Jun) nummonthtod=6;; Jul) nummonthtod=7;; Aug) nummonthtod=8;; Sep) nummonthtod=9;; Oct) nummonthtod=10;; Nov) nummonthtod=11;; Dec) nummonthtod=12;; esac # For the first six month of the year the time notation can also # be used for files modified in the last year. if (expr $nummonth \> $nummonthtod) > /dev/null; then year=`expr $year - 1` fi;; *) year=$3;; esac # The result. echo $day $month $year # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-end: "$" # End: DrIFT-2.2.3/ac-macros/texinfo.tex0000644000076400007640000072311510753606417013444 00000000000000% texinfo.tex -- TeX macros to handle Texinfo files. % % Load plain if necessary, i.e., if running under initex. \expandafter\ifx\csname fmtname\endcsname\relax\input plain\fi % \def\texinfoversion{2006-10-04.17} % % Copyright (C) 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995, % 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free % Software Foundation, Inc. % % This texinfo.tex file is free software; you can redistribute it and/or % modify it under the terms of the GNU General Public License as % published by the Free Software Foundation; either version 2, or (at % your option) any later version. % % This texinfo.tex file is distributed in the hope that it will be % useful, but WITHOUT ANY WARRANTY; without even the implied warranty % of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU % General Public License for more details. % % You should have received a copy of the GNU General Public License % along with this texinfo.tex file; see the file COPYING. If not, write % to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, % Boston, MA 02110-1301, USA. % % As a special exception, when this file is read by TeX when processing % a Texinfo source document, you may use the result without % restriction. (This has been our intent since Texinfo was invented.) % % Please try the latest version of texinfo.tex before submitting bug % reports; you can get the latest version from: % http://www.gnu.org/software/texinfo/ (the Texinfo home page), or % ftp://tug.org/tex/texinfo.tex % (and all CTAN mirrors, see http://www.ctan.org). % The texinfo.tex in any given distribution could well be out % of date, so if that's what you're using, please check. % % Send bug reports to bug-texinfo@gnu.org. Please include including a % complete document in each bug report with which we can reproduce the % problem. Patches are, of course, greatly appreciated. % % To process a Texinfo manual with TeX, it's most reliable to use the % texi2dvi shell script that comes with the distribution. For a simple % manual foo.texi, however, you can get away with this: % tex foo.texi % texindex foo.?? % tex foo.texi % tex foo.texi % dvips foo.dvi -o # or whatever; this makes foo.ps. % The extra TeX runs get the cross-reference information correct. % Sometimes one run after texindex suffices, and sometimes you need more % than two; texi2dvi does it as many times as necessary. % % It is possible to adapt texinfo.tex for other languages, to some % extent. You can get the existing language-specific files from the % full Texinfo distribution. % % The GNU Texinfo home page is http://www.gnu.org/software/texinfo. \message{Loading texinfo [version \texinfoversion]:} % If in a .fmt file, print the version number % and turn on active characters that we couldn't do earlier because % they might have appeared in the input file name. \everyjob{\message{[Texinfo version \texinfoversion]}% \catcode`+=\active \catcode`\_=\active} \message{Basics,} \chardef\other=12 % We never want plain's \outer definition of \+ in Texinfo. % For @tex, we can use \tabalign. \let\+ = \relax % Save some plain tex macros whose names we will redefine. \let\ptexb=\b \let\ptexbullet=\bullet \let\ptexc=\c \let\ptexcomma=\, \let\ptexdot=\. \let\ptexdots=\dots \let\ptexend=\end \let\ptexequiv=\equiv \let\ptexexclam=\! \let\ptexfootnote=\footnote \let\ptexgtr=> \let\ptexhat=^ \let\ptexi=\i \let\ptexindent=\indent \let\ptexinsert=\insert \let\ptexlbrace=\{ \let\ptexless=< \let\ptexnewwrite\newwrite \let\ptexnoindent=\noindent \let\ptexplus=+ \let\ptexrbrace=\} \let\ptexslash=\/ \let\ptexstar=\* \let\ptext=\t % If this character appears in an error message or help string, it % starts a new line in the output. \newlinechar = `^^J % Use TeX 3.0's \inputlineno to get the line number, for better error % messages, but if we're using an old version of TeX, don't do anything. % \ifx\inputlineno\thisisundefined \let\linenumber = \empty % Pre-3.0. \else \def\linenumber{l.\the\inputlineno:\space} \fi % Set up fixed words for English if not already set. \ifx\putwordAppendix\undefined \gdef\putwordAppendix{Appendix}\fi \ifx\putwordChapter\undefined \gdef\putwordChapter{Chapter}\fi \ifx\putwordfile\undefined \gdef\putwordfile{file}\fi \ifx\putwordin\undefined \gdef\putwordin{in}\fi \ifx\putwordIndexIsEmpty\undefined \gdef\putwordIndexIsEmpty{(Index is empty)}\fi \ifx\putwordIndexNonexistent\undefined \gdef\putwordIndexNonexistent{(Index is nonexistent)}\fi \ifx\putwordInfo\undefined \gdef\putwordInfo{Info}\fi \ifx\putwordInstanceVariableof\undefined \gdef\putwordInstanceVariableof{Instance Variable of}\fi \ifx\putwordMethodon\undefined \gdef\putwordMethodon{Method on}\fi \ifx\putwordNoTitle\undefined \gdef\putwordNoTitle{No Title}\fi \ifx\putwordof\undefined \gdef\putwordof{of}\fi \ifx\putwordon\undefined \gdef\putwordon{on}\fi \ifx\putwordpage\undefined \gdef\putwordpage{page}\fi \ifx\putwordsection\undefined \gdef\putwordsection{section}\fi \ifx\putwordSection\undefined \gdef\putwordSection{Section}\fi \ifx\putwordsee\undefined \gdef\putwordsee{see}\fi \ifx\putwordSee\undefined \gdef\putwordSee{See}\fi \ifx\putwordShortTOC\undefined \gdef\putwordShortTOC{Short Contents}\fi \ifx\putwordTOC\undefined \gdef\putwordTOC{Table of Contents}\fi % \ifx\putwordMJan\undefined \gdef\putwordMJan{January}\fi \ifx\putwordMFeb\undefined \gdef\putwordMFeb{February}\fi \ifx\putwordMMar\undefined \gdef\putwordMMar{March}\fi \ifx\putwordMApr\undefined \gdef\putwordMApr{April}\fi \ifx\putwordMMay\undefined \gdef\putwordMMay{May}\fi \ifx\putwordMJun\undefined \gdef\putwordMJun{June}\fi \ifx\putwordMJul\undefined \gdef\putwordMJul{July}\fi \ifx\putwordMAug\undefined \gdef\putwordMAug{August}\fi \ifx\putwordMSep\undefined \gdef\putwordMSep{September}\fi \ifx\putwordMOct\undefined \gdef\putwordMOct{October}\fi \ifx\putwordMNov\undefined \gdef\putwordMNov{November}\fi \ifx\putwordMDec\undefined \gdef\putwordMDec{December}\fi % \ifx\putwordDefmac\undefined \gdef\putwordDefmac{Macro}\fi \ifx\putwordDefspec\undefined \gdef\putwordDefspec{Special Form}\fi \ifx\putwordDefvar\undefined \gdef\putwordDefvar{Variable}\fi \ifx\putwordDefopt\undefined \gdef\putwordDefopt{User Option}\fi \ifx\putwordDeffunc\undefined \gdef\putwordDeffunc{Function}\fi % Since the category of space is not known, we have to be careful. \chardef\spacecat = 10 \def\spaceisspace{\catcode`\ =\spacecat} % sometimes characters are active, so we need control sequences. \chardef\colonChar = `\: \chardef\commaChar = `\, \chardef\dashChar = `\- \chardef\dotChar = `\. \chardef\exclamChar= `\! \chardef\lquoteChar= `\` \chardef\questChar = `\? \chardef\rquoteChar= `\' \chardef\semiChar = `\; \chardef\underChar = `\_ % Ignore a token. % \def\gobble#1{} % The following is used inside several \edef's. \def\makecsname#1{\expandafter\noexpand\csname#1\endcsname} % Hyphenation fixes. \hyphenation{ Flor-i-da Ghost-script Ghost-view Mac-OS Post-Script ap-pen-dix bit-map bit-maps data-base data-bases eshell fall-ing half-way long-est man-u-script man-u-scripts mini-buf-fer mini-buf-fers over-view par-a-digm par-a-digms rath-er rec-tan-gu-lar ro-bot-ics se-vere-ly set-up spa-ces spell-ing spell-ings stand-alone strong-est time-stamp time-stamps which-ever white-space wide-spread wrap-around } % Margin to add to right of even pages, to left of odd pages. \newdimen\bindingoffset \newdimen\normaloffset \newdimen\pagewidth \newdimen\pageheight % For a final copy, take out the rectangles % that mark overfull boxes (in case you have decided % that the text looks ok even though it passes the margin). % \def\finalout{\overfullrule=0pt} % @| inserts a changebar to the left of the current line. It should % surround any changed text. This approach does *not* work if the % change spans more than two lines of output. To handle that, we would % have adopt a much more difficult approach (putting marks into the main % vertical list for the beginning and end of each change). % \def\|{% % \vadjust can only be used in horizontal mode. \leavevmode % % Append this vertical mode material after the current line in the output. \vadjust{% % We want to insert a rule with the height and depth of the current % leading; that is exactly what \strutbox is supposed to record. \vskip-\baselineskip % % \vadjust-items are inserted at the left edge of the type. So % the \llap here moves out into the left-hand margin. \llap{% % % For a thicker or thinner bar, change the `1pt'. \vrule height\baselineskip width1pt % % This is the space between the bar and the text. \hskip 12pt }% }% } % Sometimes it is convenient to have everything in the transcript file % and nothing on the terminal. We don't just call \tracingall here, % since that produces some useless output on the terminal. We also make % some effort to order the tracing commands to reduce output in the log % file; cf. trace.sty in LaTeX. % \def\gloggingall{\begingroup \globaldefs = 1 \loggingall \endgroup}% \def\loggingall{% \tracingstats2 \tracingpages1 \tracinglostchars2 % 2 gives us more in etex \tracingparagraphs1 \tracingoutput1 \tracingmacros2 \tracingrestores1 \showboxbreadth\maxdimen \showboxdepth\maxdimen \ifx\eTeXversion\undefined\else % etex gives us more logging \tracingscantokens1 \tracingifs1 \tracinggroups1 \tracingnesting2 \tracingassigns1 \fi \tracingcommands3 % 3 gives us more in etex \errorcontextlines16 }% % add check for \lastpenalty to plain's definitions. If the last thing % we did was a \nobreak, we don't want to insert more space. % \def\smallbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\smallskipamount \removelastskip\penalty-50\smallskip\fi\fi} \def\medbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\medskipamount \removelastskip\penalty-100\medskip\fi\fi} \def\bigbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\bigskipamount \removelastskip\penalty-200\bigskip\fi\fi} % For @cropmarks command. % Do @cropmarks to get crop marks. % \newif\ifcropmarks \let\cropmarks = \cropmarkstrue % % Dimensions to add cropmarks at corners. % Added by P. A. MacKay, 12 Nov. 1986 % \newdimen\outerhsize \newdimen\outervsize % set by the paper size routines \newdimen\cornerlong \cornerlong=1pc \newdimen\cornerthick \cornerthick=.3pt \newdimen\topandbottommargin \topandbottommargin=.75in % Main output routine. \chardef\PAGE = 255 \output = {\onepageout{\pagecontents\PAGE}} \newbox\headlinebox \newbox\footlinebox % \onepageout takes a vbox as an argument. Note that \pagecontents % does insertions, but you have to call it yourself. \def\onepageout#1{% \ifcropmarks \hoffset=0pt \else \hoffset=\normaloffset \fi % \ifodd\pageno \advance\hoffset by \bindingoffset \else \advance\hoffset by -\bindingoffset\fi % % Do this outside of the \shipout so @code etc. will be expanded in % the headline as they should be, not taken literally (outputting ''code). \setbox\headlinebox = \vbox{\let\hsize=\pagewidth \makeheadline}% \setbox\footlinebox = \vbox{\let\hsize=\pagewidth \makefootline}% % {% % Have to do this stuff outside the \shipout because we want it to % take effect in \write's, yet the group defined by the \vbox ends % before the \shipout runs. % \indexdummies % don't expand commands in the output. \normalturnoffactive % \ in index entries must not stay \, e.g., if % the page break happens to be in the middle of an example. % We don't want .vr (or whatever) entries like this: % \entry{{\tt \indexbackslash }acronym}{32}{\code {\acronym}} % "\acronym" won't work when it's read back in; % it needs to be % {\code {{\tt \backslashcurfont }acronym} \shipout\vbox{% % Do this early so pdf references go to the beginning of the page. \ifpdfmakepagedest \pdfdest name{\the\pageno} xyz\fi % \ifcropmarks \vbox to \outervsize\bgroup \hsize = \outerhsize \vskip-\topandbottommargin \vtop to0pt{% \line{\ewtop\hfil\ewtop}% \nointerlineskip \line{% \vbox{\moveleft\cornerthick\nstop}% \hfill \vbox{\moveright\cornerthick\nstop}% }% \vss}% \vskip\topandbottommargin \line\bgroup \hfil % center the page within the outer (page) hsize. \ifodd\pageno\hskip\bindingoffset\fi \vbox\bgroup \fi % \unvbox\headlinebox \pagebody{#1}% \ifdim\ht\footlinebox > 0pt % Only leave this space if the footline is nonempty. % (We lessened \vsize for it in \oddfootingyyy.) % The \baselineskip=24pt in plain's \makefootline has no effect. \vskip 24pt \unvbox\footlinebox \fi % \ifcropmarks \egroup % end of \vbox\bgroup \hfil\egroup % end of (centering) \line\bgroup \vskip\topandbottommargin plus1fill minus1fill \boxmaxdepth = \cornerthick \vbox to0pt{\vss \line{% \vbox{\moveleft\cornerthick\nsbot}% \hfill \vbox{\moveright\cornerthick\nsbot}% }% \nointerlineskip \line{\ewbot\hfil\ewbot}% }% \egroup % \vbox from first cropmarks clause \fi }% end of \shipout\vbox }% end of group with \indexdummies \advancepageno \ifnum\outputpenalty>-20000 \else\dosupereject\fi } \newinsert\margin \dimen\margin=\maxdimen \def\pagebody#1{\vbox to\pageheight{\boxmaxdepth=\maxdepth #1}} {\catcode`\@ =11 \gdef\pagecontents#1{\ifvoid\topins\else\unvbox\topins\fi % marginal hacks, juha@viisa.uucp (Juha Takala) \ifvoid\margin\else % marginal info is present \rlap{\kern\hsize\vbox to\z@{\kern1pt\box\margin \vss}}\fi \dimen@=\dp#1 \unvbox#1 \ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi \ifr@ggedbottom \kern-\dimen@ \vfil \fi} } % Here are the rules for the cropmarks. Note that they are % offset so that the space between them is truly \outerhsize or \outervsize % (P. A. MacKay, 12 November, 1986) % \def\ewtop{\vrule height\cornerthick depth0pt width\cornerlong} \def\nstop{\vbox {\hrule height\cornerthick depth\cornerlong width\cornerthick}} \def\ewbot{\vrule height0pt depth\cornerthick width\cornerlong} \def\nsbot{\vbox {\hrule height\cornerlong depth\cornerthick width\cornerthick}} % Parse an argument, then pass it to #1. The argument is the rest of % the input line (except we remove a trailing comment). #1 should be a % macro which expects an ordinary undelimited TeX argument. % \def\parsearg{\parseargusing{}} \def\parseargusing#1#2{% \def\argtorun{#2}% \begingroup \obeylines \spaceisspace #1% \parseargline\empty% Insert the \empty token, see \finishparsearg below. } {\obeylines % \gdef\parseargline#1^^M{% \endgroup % End of the group started in \parsearg. \argremovecomment #1\comment\ArgTerm% }% } % First remove any @comment, then any @c comment. \def\argremovecomment#1\comment#2\ArgTerm{\argremovec #1\c\ArgTerm} \def\argremovec#1\c#2\ArgTerm{\argcheckspaces#1\^^M\ArgTerm} % Each occurence of `\^^M' or `\^^M' is replaced by a single space. % % \argremovec might leave us with trailing space, e.g., % @end itemize @c foo % This space token undergoes the same procedure and is eventually removed % by \finishparsearg. % \def\argcheckspaces#1\^^M{\argcheckspacesX#1\^^M \^^M} \def\argcheckspacesX#1 \^^M{\argcheckspacesY#1\^^M} \def\argcheckspacesY#1\^^M#2\^^M#3\ArgTerm{% \def\temp{#3}% \ifx\temp\empty % Do not use \next, perhaps the caller of \parsearg uses it; reuse \temp: \let\temp\finishparsearg \else \let\temp\argcheckspaces \fi % Put the space token in: \temp#1 #3\ArgTerm } % If a _delimited_ argument is enclosed in braces, they get stripped; so % to get _exactly_ the rest of the line, we had to prevent such situation. % We prepended an \empty token at the very beginning and we expand it now, % just before passing the control to \argtorun. % (Similarily, we have to think about #3 of \argcheckspacesY above: it is % either the null string, or it ends with \^^M---thus there is no danger % that a pair of braces would be stripped. % % But first, we have to remove the trailing space token. % \def\finishparsearg#1 \ArgTerm{\expandafter\argtorun\expandafter{#1}} % \parseargdef\foo{...} % is roughly equivalent to % \def\foo{\parsearg\Xfoo} % \def\Xfoo#1{...} % % Actually, I use \csname\string\foo\endcsname, ie. \\foo, as it is my % favourite TeX trick. --kasal, 16nov03 \def\parseargdef#1{% \expandafter \doparseargdef \csname\string#1\endcsname #1% } \def\doparseargdef#1#2{% \def#2{\parsearg#1}% \def#1##1% } % Several utility definitions with active space: { \obeyspaces \gdef\obeyedspace{ } % Make each space character in the input produce a normal interword % space in the output. Don't allow a line break at this space, as this % is used only in environments like @example, where each line of input % should produce a line of output anyway. % \gdef\sepspaces{\obeyspaces\let =\tie} % If an index command is used in an @example environment, any spaces % therein should become regular spaces in the raw index file, not the % expansion of \tie (\leavevmode \penalty \@M \ ). \gdef\unsepspaces{\let =\space} } \def\flushcr{\ifx\par\lisppar \def\next##1{}\else \let\next=\relax \fi \next} % Define the framework for environments in texinfo.tex. It's used like this: % % \envdef\foo{...} % \def\Efoo{...} % % It's the responsibility of \envdef to insert \begingroup before the % actual body; @end closes the group after calling \Efoo. \envdef also % defines \thisenv, so the current environment is known; @end checks % whether the environment name matches. The \checkenv macro can also be % used to check whether the current environment is the one expected. % % Non-false conditionals (@iftex, @ifset) don't fit into this, so they % are not treated as enviroments; they don't open a group. (The % implementation of @end takes care not to call \endgroup in this % special case.) % At runtime, environments start with this: \def\startenvironment#1{\begingroup\def\thisenv{#1}} % initialize \let\thisenv\empty % ... but they get defined via ``\envdef\foo{...}'': \long\def\envdef#1#2{\def#1{\startenvironment#1#2}} \def\envparseargdef#1#2{\parseargdef#1{\startenvironment#1#2}} % Check whether we're in the right environment: \def\checkenv#1{% \def\temp{#1}% \ifx\thisenv\temp \else \badenverr \fi } % Evironment mismatch, #1 expected: \def\badenverr{% \errhelp = \EMsimple \errmessage{This command can appear only \inenvironment\temp, not \inenvironment\thisenv}% } \def\inenvironment#1{% \ifx#1\empty out of any environment% \else in environment \expandafter\string#1% \fi } % @end foo executes the definition of \Efoo. % But first, it executes a specialized version of \checkenv % \parseargdef\end{% \if 1\csname iscond.#1\endcsname \else % The general wording of \badenverr may not be ideal, but... --kasal, 06nov03 \expandafter\checkenv\csname#1\endcsname \csname E#1\endcsname \endgroup \fi } \newhelp\EMsimple{Press RETURN to continue.} %% Simple single-character @ commands % @@ prints an @ % Kludge this until the fonts are right (grr). \def\@{{\tt\char64}} % This is turned off because it was never documented % and you can use @w{...} around a quote to suppress ligatures. %% Define @` and @' to be the same as ` and ' %% but suppressing ligatures. %\def\`{{`}} %\def\'{{'}} % Used to generate quoted braces. \def\mylbrace {{\tt\char123}} \def\myrbrace {{\tt\char125}} \let\{=\mylbrace \let\}=\myrbrace \begingroup % Definitions to produce \{ and \} commands for indices, % and @{ and @} for the aux/toc files. \catcode`\{ = \other \catcode`\} = \other \catcode`\[ = 1 \catcode`\] = 2 \catcode`\! = 0 \catcode`\\ = \other !gdef!lbracecmd[\{]% !gdef!rbracecmd[\}]% !gdef!lbraceatcmd[@{]% !gdef!rbraceatcmd[@}]% !endgroup % @comma{} to avoid , parsing problems. \let\comma = , % Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent % Others are defined by plain TeX: @` @' @" @^ @~ @= @u @v @H. \let\, = \c \let\dotaccent = \. \def\ringaccent#1{{\accent23 #1}} \let\tieaccent = \t \let\ubaraccent = \b \let\udotaccent = \d % Other special characters: @questiondown @exclamdown @ordf @ordm % Plain TeX defines: @AA @AE @O @OE @L (plus lowercase versions) @ss. \def\questiondown{?`} \def\exclamdown{!`} \def\ordf{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{a}}} \def\ordm{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{o}}} % Dotless i and dotless j, used for accents. \def\imacro{i} \def\jmacro{j} \def\dotless#1{% \def\temp{#1}% \ifx\temp\imacro \ptexi \else\ifx\temp\jmacro \j \else \errmessage{@dotless can be used only with i or j}% \fi\fi } % The \TeX{} logo, as in plain, but resetting the spacing so that a % period following counts as ending a sentence. (Idea found in latex.) % \edef\TeX{\TeX \spacefactor=1000 } % @LaTeX{} logo. Not quite the same results as the definition in % latex.ltx, since we use a different font for the raised A; it's most % convenient for us to use an explicitly smaller font, rather than using % the \scriptstyle font (since we don't reset \scriptstyle and % \scriptscriptstyle). % \def\LaTeX{% L\kern-.36em {\setbox0=\hbox{T}% \vbox to \ht0{\hbox{\selectfonts\lllsize A}\vss}}% \kern-.15em \TeX } % Be sure we're in horizontal mode when doing a tie, since we make space % equivalent to this in @example-like environments. Otherwise, a space % at the beginning of a line will start with \penalty -- and % since \penalty is valid in vertical mode, we'd end up putting the % penalty on the vertical list instead of in the new paragraph. {\catcode`@ = 11 % Avoid using \@M directly, because that causes trouble % if the definition is written into an index file. \global\let\tiepenalty = \@M \gdef\tie{\leavevmode\penalty\tiepenalty\ } } % @: forces normal size whitespace following. \def\:{\spacefactor=1000 } % @* forces a line break. \def\*{\hfil\break\hbox{}\ignorespaces} % @/ allows a line break. \let\/=\allowbreak % @. is an end-of-sentence period. \def\.{.\spacefactor=\endofsentencespacefactor\space} % @! is an end-of-sentence bang. \def\!{!\spacefactor=\endofsentencespacefactor\space} % @? is an end-of-sentence query. \def\?{?\spacefactor=\endofsentencespacefactor\space} % @frenchspacing on|off says whether to put extra space after punctuation. % \def\onword{on} \def\offword{off} % \parseargdef\frenchspacing{% \def\temp{#1}% \ifx\temp\onword \plainfrenchspacing \else\ifx\temp\offword \plainnonfrenchspacing \else \errhelp = \EMsimple \errmessage{Unknown @frenchspacing option `\temp', must be on/off}% \fi\fi } % @w prevents a word break. Without the \leavevmode, @w at the % beginning of a paragraph, when TeX is still in vertical mode, would % produce a whole line of output instead of starting the paragraph. \def\w#1{\leavevmode\hbox{#1}} % @group ... @end group forces ... to be all on one page, by enclosing % it in a TeX vbox. We use \vtop instead of \vbox to construct the box % to keep its height that of a normal line. According to the rules for % \topskip (p.114 of the TeXbook), the glue inserted is % max (\topskip - \ht (first item), 0). If that height is large, % therefore, no glue is inserted, and the space between the headline and % the text is small, which looks bad. % % Another complication is that the group might be very large. This can % cause the glue on the previous page to be unduly stretched, because it % does not have much material. In this case, it's better to add an % explicit \vfill so that the extra space is at the bottom. The % threshold for doing this is if the group is more than \vfilllimit % percent of a page (\vfilllimit can be changed inside of @tex). % \newbox\groupbox \def\vfilllimit{0.7} % \envdef\group{% \ifnum\catcode`\^^M=\active \else \errhelp = \groupinvalidhelp \errmessage{@group invalid in context where filling is enabled}% \fi \startsavinginserts % \setbox\groupbox = \vtop\bgroup % Do @comment since we are called inside an environment such as % @example, where each end-of-line in the input causes an % end-of-line in the output. We don't want the end-of-line after % the `@group' to put extra space in the output. Since @group % should appear on a line by itself (according to the Texinfo % manual), we don't worry about eating any user text. \comment } % % The \vtop produces a box with normal height and large depth; thus, TeX puts % \baselineskip glue before it, and (when the next line of text is done) % \lineskip glue after it. Thus, space below is not quite equal to space % above. But it's pretty close. \def\Egroup{% % To get correct interline space between the last line of the group % and the first line afterwards, we have to propagate \prevdepth. \endgraf % Not \par, as it may have been set to \lisppar. \global\dimen1 = \prevdepth \egroup % End the \vtop. % \dimen0 is the vertical size of the group's box. \dimen0 = \ht\groupbox \advance\dimen0 by \dp\groupbox % \dimen2 is how much space is left on the page (more or less). \dimen2 = \pageheight \advance\dimen2 by -\pagetotal % if the group doesn't fit on the current page, and it's a big big % group, force a page break. \ifdim \dimen0 > \dimen2 \ifdim \pagetotal < \vfilllimit\pageheight \page \fi \fi \box\groupbox \prevdepth = \dimen1 \checkinserts } % % TeX puts in an \escapechar (i.e., `@') at the beginning of the help % message, so this ends up printing `@group can only ...'. % \newhelp\groupinvalidhelp{% group can only be used in environments such as @example,^^J% where each line of input produces a line of output.} % @need space-in-mils % forces a page break if there is not space-in-mils remaining. \newdimen\mil \mil=0.001in % Old definition--didn't work. %\parseargdef\need{\par % %% This method tries to make TeX break the page naturally %% if the depth of the box does not fit. %{\baselineskip=0pt% %\vtop to #1\mil{\vfil}\kern -#1\mil\nobreak %\prevdepth=-1000pt %}} \parseargdef\need{% % Ensure vertical mode, so we don't make a big box in the middle of a % paragraph. \par % % If the @need value is less than one line space, it's useless. \dimen0 = #1\mil \dimen2 = \ht\strutbox \advance\dimen2 by \dp\strutbox \ifdim\dimen0 > \dimen2 % % Do a \strut just to make the height of this box be normal, so the % normal leading is inserted relative to the preceding line. % And a page break here is fine. \vtop to #1\mil{\strut\vfil}% % % TeX does not even consider page breaks if a penalty added to the % main vertical list is 10000 or more. But in order to see if the % empty box we just added fits on the page, we must make it consider % page breaks. On the other hand, we don't want to actually break the % page after the empty box. So we use a penalty of 9999. % % There is an extremely small chance that TeX will actually break the % page at this \penalty, if there are no other feasible breakpoints in % sight. (If the user is using lots of big @group commands, which % almost-but-not-quite fill up a page, TeX will have a hard time doing % good page breaking, for example.) However, I could not construct an % example where a page broke at this \penalty; if it happens in a real % document, then we can reconsider our strategy. \penalty9999 % % Back up by the size of the box, whether we did a page break or not. \kern -#1\mil % % Do not allow a page break right after this kern. \nobreak \fi } % @br forces paragraph break (and is undocumented). \let\br = \par % @page forces the start of a new page. % \def\page{\par\vfill\supereject} % @exdent text.... % outputs text on separate line in roman font, starting at standard page margin % This records the amount of indent in the innermost environment. % That's how much \exdent should take out. \newskip\exdentamount % This defn is used inside fill environments such as @defun. \parseargdef\exdent{\hfil\break\hbox{\kern -\exdentamount{\rm#1}}\hfil\break} % This defn is used inside nofill environments such as @example. \parseargdef\nofillexdent{{\advance \leftskip by -\exdentamount \leftline{\hskip\leftskip{\rm#1}}}} % @inmargin{WHICH}{TEXT} puts TEXT in the WHICH margin next to the current % paragraph. For more general purposes, use the \margin insertion % class. WHICH is `l' or `r'. % \newskip\inmarginspacing \inmarginspacing=1cm \def\strutdepth{\dp\strutbox} % \def\doinmargin#1#2{\strut\vadjust{% \nobreak \kern-\strutdepth \vtop to \strutdepth{% \baselineskip=\strutdepth \vss % if you have multiple lines of stuff to put here, you'll need to % make the vbox yourself of the appropriate size. \ifx#1l% \llap{\ignorespaces #2\hskip\inmarginspacing}% \else \rlap{\hskip\hsize \hskip\inmarginspacing \ignorespaces #2}% \fi \null }% }} \def\inleftmargin{\doinmargin l} \def\inrightmargin{\doinmargin r} % % @inmargin{TEXT [, RIGHT-TEXT]} % (if RIGHT-TEXT is given, use TEXT for left page, RIGHT-TEXT for right; % else use TEXT for both). % \def\inmargin#1{\parseinmargin #1,,\finish} \def\parseinmargin#1,#2,#3\finish{% not perfect, but better than nothing. \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0 > 0pt \def\lefttext{#1}% have both texts \def\righttext{#2}% \else \def\lefttext{#1}% have only one text \def\righttext{#1}% \fi % \ifodd\pageno \def\temp{\inrightmargin\righttext}% odd page -> outside is right margin \else \def\temp{\inleftmargin\lefttext}% \fi \temp } % @include file insert text of that file as input. % \def\include{\parseargusing\filenamecatcodes\includezzz} \def\includezzz#1{% \pushthisfilestack \def\thisfile{#1}% {% \makevalueexpandable \def\temp{\input #1 }% \expandafter }\temp \popthisfilestack } \def\filenamecatcodes{% \catcode`\\=\other \catcode`~=\other \catcode`^=\other \catcode`_=\other \catcode`|=\other \catcode`<=\other \catcode`>=\other \catcode`+=\other \catcode`-=\other } \def\pushthisfilestack{% \expandafter\pushthisfilestackX\popthisfilestack\StackTerm } \def\pushthisfilestackX{% \expandafter\pushthisfilestackY\thisfile\StackTerm } \def\pushthisfilestackY #1\StackTerm #2\StackTerm {% \gdef\popthisfilestack{\gdef\thisfile{#1}\gdef\popthisfilestack{#2}}% } \def\popthisfilestack{\errthisfilestackempty} \def\errthisfilestackempty{\errmessage{Internal error: the stack of filenames is empty.}} \def\thisfile{} % @center line % outputs that line, centered. % \parseargdef\center{% \ifhmode \let\next\centerH \else \let\next\centerV \fi \next{\hfil \ignorespaces#1\unskip \hfil}% } \def\centerH#1{% {% \hfil\break \advance\hsize by -\leftskip \advance\hsize by -\rightskip \line{#1}% \break }% } \def\centerV#1{\line{\kern\leftskip #1\kern\rightskip}} % @sp n outputs n lines of vertical space \parseargdef\sp{\vskip #1\baselineskip} % @comment ...line which is ignored... % @c is the same as @comment % @ignore ... @end ignore is another way to write a comment \def\comment{\begingroup \catcode`\^^M=\other% \catcode`\@=\other \catcode`\{=\other \catcode`\}=\other% \commentxxx} {\catcode`\^^M=\other \gdef\commentxxx#1^^M{\endgroup}} \let\c=\comment % @paragraphindent NCHARS % We'll use ems for NCHARS, close enough. % NCHARS can also be the word `asis' or `none'. % We cannot feasibly implement @paragraphindent asis, though. % \def\asisword{asis} % no translation, these are keywords \def\noneword{none} % \parseargdef\paragraphindent{% \def\temp{#1}% \ifx\temp\asisword \else \ifx\temp\noneword \defaultparindent = 0pt \else \defaultparindent = #1em \fi \fi \parindent = \defaultparindent } % @exampleindent NCHARS % We'll use ems for NCHARS like @paragraphindent. % It seems @exampleindent asis isn't necessary, but % I preserve it to make it similar to @paragraphindent. \parseargdef\exampleindent{% \def\temp{#1}% \ifx\temp\asisword \else \ifx\temp\noneword \lispnarrowing = 0pt \else \lispnarrowing = #1em \fi \fi } % @firstparagraphindent WORD % If WORD is `none', then suppress indentation of the first paragraph % after a section heading. If WORD is `insert', then do indent at such % paragraphs. % % The paragraph indentation is suppressed or not by calling % \suppressfirstparagraphindent, which the sectioning commands do. % We switch the definition of this back and forth according to WORD. % By default, we suppress indentation. % \def\suppressfirstparagraphindent{\dosuppressfirstparagraphindent} \def\insertword{insert} % \parseargdef\firstparagraphindent{% \def\temp{#1}% \ifx\temp\noneword \let\suppressfirstparagraphindent = \dosuppressfirstparagraphindent \else\ifx\temp\insertword \let\suppressfirstparagraphindent = \relax \else \errhelp = \EMsimple \errmessage{Unknown @firstparagraphindent option `\temp'}% \fi\fi } % Here is how we actually suppress indentation. Redefine \everypar to % \kern backwards by \parindent, and then reset itself to empty. % % We also make \indent itself not actually do anything until the next % paragraph. % \gdef\dosuppressfirstparagraphindent{% \gdef\indent{% \restorefirstparagraphindent \indent }% \gdef\noindent{% \restorefirstparagraphindent \noindent }% \global\everypar = {% \kern -\parindent \restorefirstparagraphindent }% } \gdef\restorefirstparagraphindent{% \global \let \indent = \ptexindent \global \let \noindent = \ptexnoindent \global \everypar = {}% } % @asis just yields its argument. Used with @table, for example. % \def\asis#1{#1} % @math outputs its argument in math mode. % % One complication: _ usually means subscripts, but it could also mean % an actual _ character, as in @math{@var{some_variable} + 1}. So make % _ active, and distinguish by seeing if the current family is \slfam, % which is what @var uses. { \catcode`\_ = \active \gdef\mathunderscore{% \catcode`\_=\active \def_{\ifnum\fam=\slfam \_\else\sb\fi}% } } % Another complication: we want \\ (and @\) to output a \ character. % FYI, plain.tex uses \\ as a temporary control sequence (why?), but % this is not advertised and we don't care. Texinfo does not % otherwise define @\. % % The \mathchar is class=0=ordinary, family=7=ttfam, position=5C=\. \def\mathbackslash{\ifnum\fam=\ttfam \mathchar"075C \else\backslash \fi} % \def\math{% \tex \mathunderscore \let\\ = \mathbackslash \mathactive $\finishmath } \def\finishmath#1{#1$\endgroup} % Close the group opened by \tex. % Some active characters (such as <) are spaced differently in math. % We have to reset their definitions in case the @math was an argument % to a command which sets the catcodes (such as @item or @section). % { \catcode`^ = \active \catcode`< = \active \catcode`> = \active \catcode`+ = \active \gdef\mathactive{% \let^ = \ptexhat \let< = \ptexless \let> = \ptexgtr \let+ = \ptexplus } } % @bullet and @minus need the same treatment as @math, just above. \def\bullet{$\ptexbullet$} \def\minus{$-$} % @dots{} outputs an ellipsis using the current font. % We do .5em per period so that it has the same spacing in the cm % typewriter fonts as three actual period characters; on the other hand, % in other typewriter fonts three periods are wider than 1.5em. So do % whichever is larger. % \def\dots{% \leavevmode \setbox0=\hbox{...}% get width of three periods \ifdim\wd0 > 1.5em \dimen0 = \wd0 \else \dimen0 = 1.5em \fi \hbox to \dimen0{% \hskip 0pt plus.25fil .\hskip 0pt plus1fil .\hskip 0pt plus1fil .\hskip 0pt plus.5fil }% } % @enddots{} is an end-of-sentence ellipsis. % \def\enddots{% \dots \spacefactor=\endofsentencespacefactor } % @comma{} is so commas can be inserted into text without messing up % Texinfo's parsing. % \let\comma = , % @refill is a no-op. \let\refill=\relax % If working on a large document in chapters, it is convenient to % be able to disable indexing, cross-referencing, and contents, for test runs. % This is done with @novalidate (before @setfilename). % \newif\iflinks \linkstrue % by default we want the aux files. \let\novalidate = \linksfalse % @setfilename is done at the beginning of every texinfo file. % So open here the files we need to have open while reading the input. % This makes it possible to make a .fmt file for texinfo. \def\setfilename{% \fixbackslash % Turn off hack to swallow `\input texinfo'. \iflinks \tryauxfile % Open the new aux file. TeX will close it automatically at exit. \immediate\openout\auxfile=\jobname.aux \fi % \openindices needs to do some work in any case. \openindices \let\setfilename=\comment % Ignore extra @setfilename cmds. % % If texinfo.cnf is present on the system, read it. % Useful for site-wide @afourpaper, etc. \openin 1 texinfo.cnf \ifeof 1 \else \input texinfo.cnf \fi \closein 1 % \comment % Ignore the actual filename. } % Called from \setfilename. % \def\openindices{% \newindex{cp}% \newcodeindex{fn}% \newcodeindex{vr}% \newcodeindex{tp}% \newcodeindex{ky}% \newcodeindex{pg}% } % @bye. \outer\def\bye{\pagealignmacro\tracingstats=1\ptexend} \message{pdf,} % adobe `portable' document format \newcount\tempnum \newcount\lnkcount \newtoks\filename \newcount\filenamelength \newcount\pgn \newtoks\toksA \newtoks\toksB \newtoks\toksC \newtoks\toksD \newbox\boxA \newcount\countA \newif\ifpdf \newif\ifpdfmakepagedest % when pdftex is run in dvi mode, \pdfoutput is defined (so \pdfoutput=1 % can be set). So we test for \relax and 0 as well as \undefined, % borrowed from ifpdf.sty. \ifx\pdfoutput\undefined \else \ifx\pdfoutput\relax \else \ifcase\pdfoutput \else \pdftrue \fi \fi \fi % PDF uses PostScript string constants for the names of xref targets, % for display in the outlines, and in other places. Thus, we have to % double any backslashes. Otherwise, a name like "\node" will be % interpreted as a newline (\n), followed by o, d, e. Not good. % http://www.ntg.nl/pipermail/ntg-pdftex/2004-July/000654.html % (and related messages, the final outcome is that it is up to the TeX % user to double the backslashes and otherwise make the string valid, so % that's what we do). % double active backslashes. % {\catcode`\@=0 \catcode`\\=\active @gdef@activebackslashdouble{% @catcode`@\=@active @let\=@doublebackslash} } % To handle parens, we must adopt a different approach, since parens are % not active characters. hyperref.dtx (which has the same problem as % us) handles it with this amazing macro to replace tokens. I've % tinkered with it a little for texinfo, but it's definitely from there. % % #1 is the tokens to replace. % #2 is the replacement. % #3 is the control sequence with the string. % \def\HyPsdSubst#1#2#3{% \def\HyPsdReplace##1#1##2\END{% ##1% \ifx\\##2\\% \else #2% \HyReturnAfterFi{% \HyPsdReplace##2\END }% \fi }% \xdef#3{\expandafter\HyPsdReplace#3#1\END}% } \long\def\HyReturnAfterFi#1\fi{\fi#1} % #1 is a control sequence in which to do the replacements. \def\backslashparens#1{% \xdef#1{#1}% redefine it as its expansion; the definition is simply % \lastnode when called from \setref -> \pdfmkdest. \HyPsdSubst{(}{\realbackslash(}{#1}% \HyPsdSubst{)}{\realbackslash)}{#1}% } \ifpdf \input pdfcolor \pdfcatalog{/PageMode /UseOutlines}% % #1 is image name, #2 width (might be empty/whitespace), #3 height (ditto). \def\dopdfimage#1#2#3{% \def\imagewidth{#2}\setbox0 = \hbox{\ignorespaces #2}% \def\imageheight{#3}\setbox2 = \hbox{\ignorespaces #3}% % without \immediate, pdftex seg faults when the same image is % included twice. (Version 3.14159-pre-1.0-unofficial-20010704.) \ifnum\pdftexversion < 14 \immediate\pdfimage \else \immediate\pdfximage \fi \ifdim \wd0 >0pt width \imagewidth \fi \ifdim \wd2 >0pt height \imageheight \fi \ifnum\pdftexversion<13 #1.pdf% \else {#1.pdf}% \fi \ifnum\pdftexversion < 14 \else \pdfrefximage \pdflastximage \fi} \def\pdfmkdest#1{{% % We have to set dummies so commands such as @code, and characters % such as \, aren't expanded when present in a section title. \atdummies \activebackslashdouble \def\pdfdestname{#1}% \backslashparens\pdfdestname \pdfdest name{\pdfdestname} xyz% }}% % % used to mark target names; must be expandable. \def\pdfmkpgn#1{#1}% % \let\linkcolor = \Blue % was Cyan, but that seems light? \def\endlink{\Black\pdfendlink} % Adding outlines to PDF; macros for calculating structure of outlines % come from Petr Olsak \def\expnumber#1{\expandafter\ifx\csname#1\endcsname\relax 0% \else \csname#1\endcsname \fi} \def\advancenumber#1{\tempnum=\expnumber{#1}\relax \advance\tempnum by 1 \expandafter\xdef\csname#1\endcsname{\the\tempnum}} % % #1 is the section text, which is what will be displayed in the % outline by the pdf viewer. #2 is the pdf expression for the number % of subentries (or empty, for subsubsections). #3 is the node text, % which might be empty if this toc entry had no corresponding node. % #4 is the page number % \def\dopdfoutline#1#2#3#4{% % Generate a link to the node text if that exists; else, use the % page number. We could generate a destination for the section % text in the case where a section has no node, but it doesn't % seem worth the trouble, since most documents are normally structured. \def\pdfoutlinedest{#3}% \ifx\pdfoutlinedest\empty \def\pdfoutlinedest{#4}% \else % Doubled backslashes in the name. {\activebackslashdouble \xdef\pdfoutlinedest{#3}% \backslashparens\pdfoutlinedest}% \fi % % Also double the backslashes in the display string. {\activebackslashdouble \xdef\pdfoutlinetext{#1}% \backslashparens\pdfoutlinetext}% % \pdfoutline goto name{\pdfmkpgn{\pdfoutlinedest}}#2{\pdfoutlinetext}% } % \def\pdfmakeoutlines{% \begingroup % Thanh's hack / proper braces in bookmarks \edef\mylbrace{\iftrue \string{\else}\fi}\let\{=\mylbrace \edef\myrbrace{\iffalse{\else\string}\fi}\let\}=\myrbrace % % Read toc silently, to get counts of subentries for \pdfoutline. \def\numchapentry##1##2##3##4{% \def\thischapnum{##2}% \def\thissecnum{0}% \def\thissubsecnum{0}% }% \def\numsecentry##1##2##3##4{% \advancenumber{chap\thischapnum}% \def\thissecnum{##2}% \def\thissubsecnum{0}% }% \def\numsubsecentry##1##2##3##4{% \advancenumber{sec\thissecnum}% \def\thissubsecnum{##2}% }% \def\numsubsubsecentry##1##2##3##4{% \advancenumber{subsec\thissubsecnum}% }% \def\thischapnum{0}% \def\thissecnum{0}% \def\thissubsecnum{0}% % % use \def rather than \let here because we redefine \chapentry et % al. a second time, below. \def\appentry{\numchapentry}% \def\appsecentry{\numsecentry}% \def\appsubsecentry{\numsubsecentry}% \def\appsubsubsecentry{\numsubsubsecentry}% \def\unnchapentry{\numchapentry}% \def\unnsecentry{\numsecentry}% \def\unnsubsecentry{\numsubsecentry}% \def\unnsubsubsecentry{\numsubsubsecentry}% \readdatafile{toc}% % % Read toc second time, this time actually producing the outlines. % The `-' means take the \expnumber as the absolute number of % subentries, which we calculated on our first read of the .toc above. % % We use the node names as the destinations. \def\numchapentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{chap##2}}{##3}{##4}}% \def\numsecentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{sec##2}}{##3}{##4}}% \def\numsubsecentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{subsec##2}}{##3}{##4}}% \def\numsubsubsecentry##1##2##3##4{% count is always zero \dopdfoutline{##1}{}{##3}{##4}}% % % PDF outlines are displayed using system fonts, instead of % document fonts. Therefore we cannot use special characters, % since the encoding is unknown. For example, the eogonek from % Latin 2 (0xea) gets translated to a | character. Info from % Staszek Wawrykiewicz, 19 Jan 2004 04:09:24 +0100. % % xx to do this right, we have to translate 8-bit characters to % their "best" equivalent, based on the @documentencoding. Right % now, I guess we'll just let the pdf reader have its way. \indexnofonts \setupdatafile \catcode`\\=\active \otherbackslash \input \jobname.toc \endgroup } % \def\skipspaces#1{\def\PP{#1}\def\D{|}% \ifx\PP\D\let\nextsp\relax \else\let\nextsp\skipspaces \ifx\p\space\else\addtokens{\filename}{\PP}% \advance\filenamelength by 1 \fi \fi \nextsp} \def\getfilename#1{\filenamelength=0\expandafter\skipspaces#1|\relax} \ifnum\pdftexversion < 14 \let \startlink \pdfannotlink \else \let \startlink \pdfstartlink \fi % make a live url in pdf output. \def\pdfurl#1{% \begingroup % it seems we really need yet another set of dummies; have not % tried to figure out what each command should do in the context % of @url. for now, just make @/ a no-op, that's the only one % people have actually reported a problem with. % \normalturnoffactive \def\@{@}% \let\/=\empty \makevalueexpandable \leavevmode\Red \startlink attr{/Border [0 0 0]}% user{/Subtype /Link /A << /S /URI /URI (#1) >>}% \endgroup} \def\pdfgettoks#1.{\setbox\boxA=\hbox{\toksA={#1.}\toksB={}\maketoks}} \def\addtokens#1#2{\edef\addtoks{\noexpand#1={\the#1#2}}\addtoks} \def\adn#1{\addtokens{\toksC}{#1}\global\countA=1\let\next=\maketoks} \def\poptoks#1#2|ENDTOKS|{\let\first=#1\toksD={#1}\toksA={#2}} \def\maketoks{% \expandafter\poptoks\the\toksA|ENDTOKS|\relax \ifx\first0\adn0 \else\ifx\first1\adn1 \else\ifx\first2\adn2 \else\ifx\first3\adn3 \else\ifx\first4\adn4 \else\ifx\first5\adn5 \else\ifx\first6\adn6 \else\ifx\first7\adn7 \else\ifx\first8\adn8 \else\ifx\first9\adn9 \else \ifnum0=\countA\else\makelink\fi \ifx\first.\let\next=\done\else \let\next=\maketoks \addtokens{\toksB}{\the\toksD} \ifx\first,\addtokens{\toksB}{\space}\fi \fi \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi \next} \def\makelink{\addtokens{\toksB}% {\noexpand\pdflink{\the\toksC}}\toksC={}\global\countA=0} \def\pdflink#1{% \startlink attr{/Border [0 0 0]} goto name{\pdfmkpgn{#1}} \linkcolor #1\endlink} \def\done{\edef\st{\global\noexpand\toksA={\the\toksB}}\st} \else \let\pdfmkdest = \gobble \let\pdfurl = \gobble \let\endlink = \relax \let\linkcolor = \relax \let\pdfmakeoutlines = \relax \fi % \ifx\pdfoutput \message{fonts,} % Change the current font style to #1, remembering it in \curfontstyle. % For now, we do not accumulate font styles: @b{@i{foo}} prints foo in % italics, not bold italics. % \def\setfontstyle#1{% \def\curfontstyle{#1}% not as a control sequence, because we are \edef'd. \csname ten#1\endcsname % change the current font } % Select #1 fonts with the current style. % \def\selectfonts#1{\csname #1fonts\endcsname \csname\curfontstyle\endcsname} \def\rm{\fam=0 \setfontstyle{rm}} \def\it{\fam=\itfam \setfontstyle{it}} \def\sl{\fam=\slfam \setfontstyle{sl}} \def\bf{\fam=\bffam \setfontstyle{bf}}\def\bfstylename{bf} \def\tt{\fam=\ttfam \setfontstyle{tt}} % Texinfo sort of supports the sans serif font style, which plain TeX does not. % So we set up a \sf. \newfam\sffam \def\sf{\fam=\sffam \setfontstyle{sf}} \let\li = \sf % Sometimes we call it \li, not \sf. % We don't need math for this font style. \def\ttsl{\setfontstyle{ttsl}} % Default leading. \newdimen\textleading \textleading = 13.2pt % Set the baselineskip to #1, and the lineskip and strut size % correspondingly. There is no deep meaning behind these magic numbers % used as factors; they just match (closely enough) what Knuth defined. % \def\lineskipfactor{.08333} \def\strutheightpercent{.70833} \def\strutdepthpercent {.29167} % \def\setleading#1{% \normalbaselineskip = #1\relax \normallineskip = \lineskipfactor\normalbaselineskip \normalbaselines \setbox\strutbox =\hbox{% \vrule width0pt height\strutheightpercent\baselineskip depth \strutdepthpercent \baselineskip }% } % Set the font macro #1 to the font named #2, adding on the % specified font prefix (normally `cm'). % #3 is the font's design size, #4 is a scale factor \def\setfont#1#2#3#4{\font#1=\fontprefix#2#3 scaled #4} % Use cm as the default font prefix. % To specify the font prefix, you must define \fontprefix % before you read in texinfo.tex. \ifx\fontprefix\undefined \def\fontprefix{cm} \fi % Support font families that don't use the same naming scheme as CM. \def\rmshape{r} \def\rmbshape{bx} %where the normal face is bold \def\bfshape{b} \def\bxshape{bx} \def\ttshape{tt} \def\ttbshape{tt} \def\ttslshape{sltt} \def\itshape{ti} \def\itbshape{bxti} \def\slshape{sl} \def\slbshape{bxsl} \def\sfshape{ss} \def\sfbshape{ss} \def\scshape{csc} \def\scbshape{csc} % Definitions for a main text size of 11pt. This is the default in % Texinfo. % \def\definetextfontsizexi{ % Text fonts (11.2pt, magstep1). \def\textnominalsize{11pt} \edef\mainmagstep{\magstephalf} \setfont\textrm\rmshape{10}{\mainmagstep} \setfont\texttt\ttshape{10}{\mainmagstep} \setfont\textbf\bfshape{10}{\mainmagstep} \setfont\textit\itshape{10}{\mainmagstep} \setfont\textsl\slshape{10}{\mainmagstep} \setfont\textsf\sfshape{10}{\mainmagstep} \setfont\textsc\scshape{10}{\mainmagstep} \setfont\textttsl\ttslshape{10}{\mainmagstep} \font\texti=cmmi10 scaled \mainmagstep \font\textsy=cmsy10 scaled \mainmagstep % A few fonts for @defun names and args. \setfont\defbf\bfshape{10}{\magstep1} \setfont\deftt\ttshape{10}{\magstep1} \setfont\defttsl\ttslshape{10}{\magstep1} \def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf} % Fonts for indices, footnotes, small examples (9pt). \def\smallnominalsize{9pt} \setfont\smallrm\rmshape{9}{1000} \setfont\smalltt\ttshape{9}{1000} \setfont\smallbf\bfshape{10}{900} \setfont\smallit\itshape{9}{1000} \setfont\smallsl\slshape{9}{1000} \setfont\smallsf\sfshape{9}{1000} \setfont\smallsc\scshape{10}{900} \setfont\smallttsl\ttslshape{10}{900} \font\smalli=cmmi9 \font\smallsy=cmsy9 % Fonts for small examples (8pt). \def\smallernominalsize{8pt} \setfont\smallerrm\rmshape{8}{1000} \setfont\smallertt\ttshape{8}{1000} \setfont\smallerbf\bfshape{10}{800} \setfont\smallerit\itshape{8}{1000} \setfont\smallersl\slshape{8}{1000} \setfont\smallersf\sfshape{8}{1000} \setfont\smallersc\scshape{10}{800} \setfont\smallerttsl\ttslshape{10}{800} \font\smalleri=cmmi8 \font\smallersy=cmsy8 % Fonts for title page (20.4pt): \def\titlenominalsize{20pt} \setfont\titlerm\rmbshape{12}{\magstep3} \setfont\titleit\itbshape{10}{\magstep4} \setfont\titlesl\slbshape{10}{\magstep4} \setfont\titlett\ttbshape{12}{\magstep3} \setfont\titlettsl\ttslshape{10}{\magstep4} \setfont\titlesf\sfbshape{17}{\magstep1} \let\titlebf=\titlerm \setfont\titlesc\scbshape{10}{\magstep4} \font\titlei=cmmi12 scaled \magstep3 \font\titlesy=cmsy10 scaled \magstep4 \def\authorrm{\secrm} \def\authortt{\sectt} % Chapter (and unnumbered) fonts (17.28pt). \def\chapnominalsize{17pt} \setfont\chaprm\rmbshape{12}{\magstep2} \setfont\chapit\itbshape{10}{\magstep3} \setfont\chapsl\slbshape{10}{\magstep3} \setfont\chaptt\ttbshape{12}{\magstep2} \setfont\chapttsl\ttslshape{10}{\magstep3} \setfont\chapsf\sfbshape{17}{1000} \let\chapbf=\chaprm \setfont\chapsc\scbshape{10}{\magstep3} \font\chapi=cmmi12 scaled \magstep2 \font\chapsy=cmsy10 scaled \magstep3 % Section fonts (14.4pt). \def\secnominalsize{14pt} \setfont\secrm\rmbshape{12}{\magstep1} \setfont\secit\itbshape{10}{\magstep2} \setfont\secsl\slbshape{10}{\magstep2} \setfont\sectt\ttbshape{12}{\magstep1} \setfont\secttsl\ttslshape{10}{\magstep2} \setfont\secsf\sfbshape{12}{\magstep1} \let\secbf\secrm \setfont\secsc\scbshape{10}{\magstep2} \font\seci=cmmi12 scaled \magstep1 \font\secsy=cmsy10 scaled \magstep2 % Subsection fonts (13.15pt). \def\ssecnominalsize{13pt} \setfont\ssecrm\rmbshape{12}{\magstephalf} \setfont\ssecit\itbshape{10}{1315} \setfont\ssecsl\slbshape{10}{1315} \setfont\ssectt\ttbshape{12}{\magstephalf} \setfont\ssecttsl\ttslshape{10}{1315} \setfont\ssecsf\sfbshape{12}{\magstephalf} \let\ssecbf\ssecrm \setfont\ssecsc\scbshape{10}{1315} \font\sseci=cmmi12 scaled \magstephalf \font\ssecsy=cmsy10 scaled 1315 % Reduced fonts for @acro in text (10pt). \def\reducednominalsize{10pt} \setfont\reducedrm\rmshape{10}{1000} \setfont\reducedtt\ttshape{10}{1000} \setfont\reducedbf\bfshape{10}{1000} \setfont\reducedit\itshape{10}{1000} \setfont\reducedsl\slshape{10}{1000} \setfont\reducedsf\sfshape{10}{1000} \setfont\reducedsc\scshape{10}{1000} \setfont\reducedttsl\ttslshape{10}{1000} \font\reducedi=cmmi10 \font\reducedsy=cmsy10 % reset the current fonts \textfonts \rm } % end of 11pt text font size definitions % Definitions to make the main text be 10pt Computer Modern, with % section, chapter, etc., sizes following suit. This is for the GNU % Press printing of the Emacs 22 manual. Maybe other manuals in the % future. Used with @smallbook, which sets the leading to 12pt. % \def\definetextfontsizex{% % Text fonts (10pt). \def\textnominalsize{10pt} \edef\mainmagstep{1000} \setfont\textrm\rmshape{10}{\mainmagstep} \setfont\texttt\ttshape{10}{\mainmagstep} \setfont\textbf\bfshape{10}{\mainmagstep} \setfont\textit\itshape{10}{\mainmagstep} \setfont\textsl\slshape{10}{\mainmagstep} \setfont\textsf\sfshape{10}{\mainmagstep} \setfont\textsc\scshape{10}{\mainmagstep} \setfont\textttsl\ttslshape{10}{\mainmagstep} \font\texti=cmmi10 scaled \mainmagstep \font\textsy=cmsy10 scaled \mainmagstep % A few fonts for @defun names and args. \setfont\defbf\bfshape{10}{\magstephalf} \setfont\deftt\ttshape{10}{\magstephalf} \setfont\defttsl\ttslshape{10}{\magstephalf} \def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf} % Fonts for indices, footnotes, small examples (9pt). \def\smallnominalsize{9pt} \setfont\smallrm\rmshape{9}{1000} \setfont\smalltt\ttshape{9}{1000} \setfont\smallbf\bfshape{10}{900} \setfont\smallit\itshape{9}{1000} \setfont\smallsl\slshape{9}{1000} \setfont\smallsf\sfshape{9}{1000} \setfont\smallsc\scshape{10}{900} \setfont\smallttsl\ttslshape{10}{900} \font\smalli=cmmi9 \font\smallsy=cmsy9 % Fonts for small examples (8pt). \def\smallernominalsize{8pt} \setfont\smallerrm\rmshape{8}{1000} \setfont\smallertt\ttshape{8}{1000} \setfont\smallerbf\bfshape{10}{800} \setfont\smallerit\itshape{8}{1000} \setfont\smallersl\slshape{8}{1000} \setfont\smallersf\sfshape{8}{1000} \setfont\smallersc\scshape{10}{800} \setfont\smallerttsl\ttslshape{10}{800} \font\smalleri=cmmi8 \font\smallersy=cmsy8 % Fonts for title page (20.4pt): \def\titlenominalsize{20pt} \setfont\titlerm\rmbshape{12}{\magstep3} \setfont\titleit\itbshape{10}{\magstep4} \setfont\titlesl\slbshape{10}{\magstep4} \setfont\titlett\ttbshape{12}{\magstep3} \setfont\titlettsl\ttslshape{10}{\magstep4} \setfont\titlesf\sfbshape{17}{\magstep1} \let\titlebf=\titlerm \setfont\titlesc\scbshape{10}{\magstep4} \font\titlei=cmmi12 scaled \magstep3 \font\titlesy=cmsy10 scaled \magstep4 \def\authorrm{\secrm} \def\authortt{\sectt} % Chapter fonts (14.4pt). \def\chapnominalsize{14pt} \setfont\chaprm\rmbshape{12}{\magstep1} \setfont\chapit\itbshape{10}{\magstep2} \setfont\chapsl\slbshape{10}{\magstep2} \setfont\chaptt\ttbshape{12}{\magstep1} \setfont\chapttsl\ttslshape{10}{\magstep2} \setfont\chapsf\sfbshape{12}{\magstep1} \let\chapbf\chaprm \setfont\chapsc\scbshape{10}{\magstep2} \font\chapi=cmmi12 scaled \magstep1 \font\chapsy=cmsy10 scaled \magstep2 % Section fonts (12pt). \def\secnominalsize{12pt} \setfont\secrm\rmbshape{12}{1000} \setfont\secit\itbshape{10}{\magstep1} \setfont\secsl\slbshape{10}{\magstep1} \setfont\sectt\ttbshape{12}{1000} \setfont\secttsl\ttslshape{10}{\magstep1} \setfont\secsf\sfbshape{12}{1000} \let\secbf\secrm \setfont\secsc\scbshape{10}{\magstep1} \font\seci=cmmi12 \font\secsy=cmsy10 scaled \magstep1 % Subsection fonts (10pt). \def\ssecnominalsize{10pt} \setfont\ssecrm\rmbshape{10}{1000} \setfont\ssecit\itbshape{10}{1000} \setfont\ssecsl\slbshape{10}{1000} \setfont\ssectt\ttbshape{10}{1000} \setfont\ssecttsl\ttslshape{10}{1000} \setfont\ssecsf\sfbshape{10}{1000} \let\ssecbf\ssecrm \setfont\ssecsc\scbshape{10}{1000} \font\sseci=cmmi10 \font\ssecsy=cmsy10 % Reduced fonts for @acro in text (9pt). \def\reducednominalsize{9pt} \setfont\reducedrm\rmshape{9}{1000} \setfont\reducedtt\ttshape{9}{1000} \setfont\reducedbf\bfshape{10}{900} \setfont\reducedit\itshape{9}{1000} \setfont\reducedsl\slshape{9}{1000} \setfont\reducedsf\sfshape{9}{1000} \setfont\reducedsc\scshape{10}{900} \setfont\reducedttsl\ttslshape{10}{900} \font\reducedi=cmmi9 \font\reducedsy=cmsy9 % reduce space between paragraphs \divide\parskip by 2 % reset the current fonts \textfonts \rm } % end of 10pt text font size definitions % We provide the user-level command % @fonttextsize 10 % (or 11) to redefine the text font size. pt is assumed. % \def\xword{10} \def\xiword{11} % \parseargdef\fonttextsize{% \def\textsizearg{#1}% \wlog{doing @fonttextsize \textsizearg}% % % Set \globaldefs so that documents can use this inside @tex, since % makeinfo 4.8 does not support it, but we need it nonetheless. % \begingroup \globaldefs=1 \ifx\textsizearg\xword \definetextfontsizex \else \ifx\textsizearg\xiword \definetextfontsizexi \else \errhelp=\EMsimple \errmessage{@fonttextsize only supports `10' or `11', not `\textsizearg'} \fi\fi \endgroup } % In order for the font changes to affect most math symbols and letters, % we have to define the \textfont of the standard families. Since % texinfo doesn't allow for producing subscripts and superscripts except % in the main text, we don't bother to reset \scriptfont and % \scriptscriptfont (which would also require loading a lot more fonts). % \def\resetmathfonts{% \textfont0=\tenrm \textfont1=\teni \textfont2=\tensy \textfont\itfam=\tenit \textfont\slfam=\tensl \textfont\bffam=\tenbf \textfont\ttfam=\tentt \textfont\sffam=\tensf } % The font-changing commands redefine the meanings of \tenSTYLE, instead % of just \STYLE. We do this because \STYLE needs to also set the % current \fam for math mode. Our \STYLE (e.g., \rm) commands hardwire % \tenSTYLE to set the current font. % % Each font-changing command also sets the names \lsize (one size lower) % and \lllsize (three sizes lower). These relative commands are used in % the LaTeX logo and acronyms. % % This all needs generalizing, badly. % \def\textfonts{% \let\tenrm=\textrm \let\tenit=\textit \let\tensl=\textsl \let\tenbf=\textbf \let\tentt=\texttt \let\smallcaps=\textsc \let\tensf=\textsf \let\teni=\texti \let\tensy=\textsy \let\tenttsl=\textttsl \def\curfontsize{text}% \def\lsize{reduced}\def\lllsize{smaller}% \resetmathfonts \setleading{\textleading}} \def\titlefonts{% \let\tenrm=\titlerm \let\tenit=\titleit \let\tensl=\titlesl \let\tenbf=\titlebf \let\tentt=\titlett \let\smallcaps=\titlesc \let\tensf=\titlesf \let\teni=\titlei \let\tensy=\titlesy \let\tenttsl=\titlettsl \def\curfontsize{title}% \def\lsize{chap}\def\lllsize{subsec}% \resetmathfonts \setleading{25pt}} \def\titlefont#1{{\titlefonts\rm #1}} \def\chapfonts{% \let\tenrm=\chaprm \let\tenit=\chapit \let\tensl=\chapsl \let\tenbf=\chapbf \let\tentt=\chaptt \let\smallcaps=\chapsc \let\tensf=\chapsf \let\teni=\chapi \let\tensy=\chapsy \let\tenttsl=\chapttsl \def\curfontsize{chap}% \def\lsize{sec}\def\lllsize{text}% \resetmathfonts \setleading{19pt}} \def\secfonts{% \let\tenrm=\secrm \let\tenit=\secit \let\tensl=\secsl \let\tenbf=\secbf \let\tentt=\sectt \let\smallcaps=\secsc \let\tensf=\secsf \let\teni=\seci \let\tensy=\secsy \let\tenttsl=\secttsl \def\curfontsize{sec}% \def\lsize{subsec}\def\lllsize{reduced}% \resetmathfonts \setleading{16pt}} \def\subsecfonts{% \let\tenrm=\ssecrm \let\tenit=\ssecit \let\tensl=\ssecsl \let\tenbf=\ssecbf \let\tentt=\ssectt \let\smallcaps=\ssecsc \let\tensf=\ssecsf \let\teni=\sseci \let\tensy=\ssecsy \let\tenttsl=\ssecttsl \def\curfontsize{ssec}% \def\lsize{text}\def\lllsize{small}% \resetmathfonts \setleading{15pt}} \let\subsubsecfonts = \subsecfonts \def\reducedfonts{% \let\tenrm=\reducedrm \let\tenit=\reducedit \let\tensl=\reducedsl \let\tenbf=\reducedbf \let\tentt=\reducedtt \let\reducedcaps=\reducedsc \let\tensf=\reducedsf \let\teni=\reducedi \let\tensy=\reducedsy \let\tenttsl=\reducedttsl \def\curfontsize{reduced}% \def\lsize{small}\def\lllsize{smaller}% \resetmathfonts \setleading{10.5pt}} \def\smallfonts{% \let\tenrm=\smallrm \let\tenit=\smallit \let\tensl=\smallsl \let\tenbf=\smallbf \let\tentt=\smalltt \let\smallcaps=\smallsc \let\tensf=\smallsf \let\teni=\smalli \let\tensy=\smallsy \let\tenttsl=\smallttsl \def\curfontsize{small}% \def\lsize{smaller}\def\lllsize{smaller}% \resetmathfonts \setleading{10.5pt}} \def\smallerfonts{% \let\tenrm=\smallerrm \let\tenit=\smallerit \let\tensl=\smallersl \let\tenbf=\smallerbf \let\tentt=\smallertt \let\smallcaps=\smallersc \let\tensf=\smallersf \let\teni=\smalleri \let\tensy=\smallersy \let\tenttsl=\smallerttsl \def\curfontsize{smaller}% \def\lsize{smaller}\def\lllsize{smaller}% \resetmathfonts \setleading{9.5pt}} % Set the fonts to use with the @small... environments. \let\smallexamplefonts = \smallfonts % About \smallexamplefonts. If we use \smallfonts (9pt), @smallexample % can fit this many characters: % 8.5x11=86 smallbook=72 a4=90 a5=69 % If we use \scriptfonts (8pt), then we can fit this many characters: % 8.5x11=90+ smallbook=80 a4=90+ a5=77 % For me, subjectively, the few extra characters that fit aren't worth % the additional smallness of 8pt. So I'm making the default 9pt. % % By the way, for comparison, here's what fits with @example (10pt): % 8.5x11=71 smallbook=60 a4=75 a5=58 % % I wish the USA used A4 paper. % --karl, 24jan03. % Set up the default fonts, so we can use them for creating boxes. % \definetextfontsizexi % Define these so they can be easily changed for other fonts. \def\angleleft{$\langle$} \def\angleright{$\rangle$} % Count depth in font-changes, for error checks \newcount\fontdepth \fontdepth=0 % Fonts for short table of contents. \setfont\shortcontrm\rmshape{12}{1000} \setfont\shortcontbf\bfshape{10}{\magstep1} % no cmb12 \setfont\shortcontsl\slshape{12}{1000} \setfont\shortconttt\ttshape{12}{1000} %% Add scribe-like font environments, plus @l for inline lisp (usually sans %% serif) and @ii for TeX italic % \smartitalic{ARG} outputs arg in italics, followed by an italic correction % unless the following character is such as not to need one. \def\smartitalicx{\ifx\next,\else\ifx\next-\else\ifx\next.\else \ptexslash\fi\fi\fi} \def\smartslanted#1{{\ifusingtt\ttsl\sl #1}\futurelet\next\smartitalicx} \def\smartitalic#1{{\ifusingtt\ttsl\it #1}\futurelet\next\smartitalicx} % like \smartslanted except unconditionally uses \ttsl. % @var is set to this for defun arguments. \def\ttslanted#1{{\ttsl #1}\futurelet\next\smartitalicx} % like \smartslanted except unconditionally use \sl. We never want % ttsl for book titles, do we? \def\cite#1{{\sl #1}\futurelet\next\smartitalicx} \let\i=\smartitalic \let\slanted=\smartslanted \let\var=\smartslanted \let\dfn=\smartslanted \let\emph=\smartitalic % @b, explicit bold. \def\b#1{{\bf #1}} \let\strong=\b % @sansserif, explicit sans. \def\sansserif#1{{\sf #1}} % We can't just use \exhyphenpenalty, because that only has effect at % the end of a paragraph. Restore normal hyphenation at the end of the % group within which \nohyphenation is presumably called. % \def\nohyphenation{\hyphenchar\font = -1 \aftergroup\restorehyphenation} \def\restorehyphenation{\hyphenchar\font = `- } % Set sfcode to normal for the chars that usually have another value. % Can't use plain's \frenchspacing because it uses the `\x notation, and % sometimes \x has an active definition that messes things up. % \catcode`@=11 \def\plainfrenchspacing{% \sfcode\dotChar =\@m \sfcode\questChar=\@m \sfcode\exclamChar=\@m \sfcode\colonChar=\@m \sfcode\semiChar =\@m \sfcode\commaChar =\@m \def\endofsentencespacefactor{1000}% for @. and friends } \def\plainnonfrenchspacing{% \sfcode`\.3000\sfcode`\?3000\sfcode`\!3000 \sfcode`\:2000\sfcode`\;1500\sfcode`\,1250 \def\endofsentencespacefactor{3000}% for @. and friends } \catcode`@=\other \def\endofsentencespacefactor{3000}% default \def\t#1{% {\tt \rawbackslash \plainfrenchspacing #1}% \null } \def\samp#1{`\tclose{#1}'\null} \setfont\keyrm\rmshape{8}{1000} \font\keysy=cmsy9 \def\key#1{{\keyrm\textfont2=\keysy \leavevmode\hbox{% \raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{% \vbox{\hrule\kern-0.4pt \hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}% \kern-0.4pt\hrule}% \kern-.06em\raise0.4pt\hbox{\angleright}}}} % The old definition, with no lozenge: %\def\key #1{{\ttsl \nohyphenation \uppercase{#1}}\null} \def\ctrl #1{{\tt \rawbackslash \hat}#1} % @file, @option are the same as @samp. \let\file=\samp \let\option=\samp % @code is a modification of @t, % which makes spaces the same size as normal in the surrounding text. \def\tclose#1{% {% % Change normal interword space to be same as for the current font. \spaceskip = \fontdimen2\font % % Switch to typewriter. \tt % % But `\ ' produces the large typewriter interword space. \def\ {{\spaceskip = 0pt{} }}% % % Turn off hyphenation. \nohyphenation % \rawbackslash \plainfrenchspacing #1% }% \null } % We *must* turn on hyphenation at `-' and `_' in @code. % Otherwise, it is too hard to avoid overfull hboxes % in the Emacs manual, the Library manual, etc. % Unfortunately, TeX uses one parameter (\hyphenchar) to control % both hyphenation at - and hyphenation within words. % We must therefore turn them both off (\tclose does that) % and arrange explicitly to hyphenate at a dash. % -- rms. { \catcode`\-=\active \catcode`\_=\active \catcode`\'=\active \catcode`\`=\active % \global\def\code{\begingroup \catcode\rquoteChar=\active \catcode\lquoteChar=\active \let'\codequoteright \let`\codequoteleft % \catcode\dashChar=\active \catcode\underChar=\active \ifallowcodebreaks \let-\codedash \let_\codeunder \else \let-\realdash \let_\realunder \fi \codex } } \def\realdash{-} \def\codedash{-\discretionary{}{}{}} \def\codeunder{% % this is all so @math{@code{var_name}+1} can work. In math mode, _ % is "active" (mathcode"8000) and \normalunderscore (or \char95, etc.) % will therefore expand the active definition of _, which is us % (inside @code that is), therefore an endless loop. \ifusingtt{\ifmmode \mathchar"075F % class 0=ordinary, family 7=ttfam, pos 0x5F=_. \else\normalunderscore \fi \discretionary{}{}{}}% {\_}% } \def\codex #1{\tclose{#1}\endgroup} % An additional complication: the above will allow breaks after, e.g., % each of the four underscores in __typeof__. This is undesirable in % some manuals, especially if they don't have long identifiers in % general. @allowcodebreaks provides a way to control this. % \newif\ifallowcodebreaks \allowcodebreakstrue \def\keywordtrue{true} \def\keywordfalse{false} \parseargdef\allowcodebreaks{% \def\txiarg{#1}% \ifx\txiarg\keywordtrue \allowcodebreakstrue \else\ifx\txiarg\keywordfalse \allowcodebreaksfalse \else \errhelp = \EMsimple \errmessage{Unknown @allowcodebreaks option `\txiarg'}% \fi\fi } % @kbd is like @code, except that if the argument is just one @key command, % then @kbd has no effect. % @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always), % `example' (@kbd uses ttsl only inside of @example and friends), % or `code' (@kbd uses normal tty font always). \parseargdef\kbdinputstyle{% \def\txiarg{#1}% \ifx\txiarg\worddistinct \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}% \else\ifx\txiarg\wordexample \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}% \else\ifx\txiarg\wordcode \gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}% \else \errhelp = \EMsimple \errmessage{Unknown @kbdinputstyle option `\txiarg'}% \fi\fi\fi } \def\worddistinct{distinct} \def\wordexample{example} \def\wordcode{code} % Default is `distinct.' \kbdinputstyle distinct \def\xkey{\key} \def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}% \ifx\one\xkey\ifx\threex\three \key{#2}% \else{\tclose{\kbdfont\look}}\fi \else{\tclose{\kbdfont\look}}\fi} % For @indicateurl, @env, @command quotes seem unnecessary, so use \code. \let\indicateurl=\code \let\env=\code \let\command=\code % @uref (abbreviation for `urlref') takes an optional (comma-separated) % second argument specifying the text to display and an optional third % arg as text to display instead of (rather than in addition to) the url % itself. First (mandatory) arg is the url. Perhaps eventually put in % a hypertex \special here. % \def\uref#1{\douref #1,,,\finish} \def\douref#1,#2,#3,#4\finish{\begingroup \unsepspaces \pdfurl{#1}% \setbox0 = \hbox{\ignorespaces #3}% \ifdim\wd0 > 0pt \unhbox0 % third arg given, show only that \else \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0 > 0pt \ifpdf \unhbox0 % PDF: 2nd arg given, show only it \else \unhbox0\ (\code{#1})% DVI: 2nd arg given, show both it and url \fi \else \code{#1}% only url given, so show it \fi \fi \endlink \endgroup} % @url synonym for @uref, since that's how everyone uses it. % \let\url=\uref % rms does not like angle brackets --karl, 17may97. % So now @email is just like @uref, unless we are pdf. % %\def\email#1{\angleleft{\tt #1}\angleright} \ifpdf \def\email#1{\doemail#1,,\finish} \def\doemail#1,#2,#3\finish{\begingroup \unsepspaces \pdfurl{mailto:#1}% \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0>0pt\unhbox0\else\code{#1}\fi \endlink \endgroup} \else \let\email=\uref \fi % Check if we are currently using a typewriter font. Since all the % Computer Modern typewriter fonts have zero interword stretch (and % shrink), and it is reasonable to expect all typewriter fonts to have % this property, we can check that font parameter. % \def\ifmonospace{\ifdim\fontdimen3\font=0pt } % Typeset a dimension, e.g., `in' or `pt'. The only reason for the % argument is to make the input look right: @dmn{pt} instead of @dmn{}pt. % \def\dmn#1{\thinspace #1} \def\kbd#1{\def\look{#1}\expandafter\kbdfoo\look??\par} % @l was never documented to mean ``switch to the Lisp font'', % and it is not used as such in any manual I can find. We need it for % Polish suppressed-l. --karl, 22sep96. %\def\l#1{{\li #1}\null} % Explicit font changes: @r, @sc, undocumented @ii. \def\r#1{{\rm #1}} % roman font \def\sc#1{{\smallcaps#1}} % smallcaps font \def\ii#1{{\it #1}} % italic font % @acronym for "FBI", "NATO", and the like. % We print this one point size smaller, since it's intended for % all-uppercase. % \def\acronym#1{\doacronym #1,,\finish} \def\doacronym#1,#2,#3\finish{% {\selectfonts\lsize #1}% \def\temp{#2}% \ifx\temp\empty \else \space ({\unsepspaces \ignorespaces \temp \unskip})% \fi } % @abbr for "Comput. J." and the like. % No font change, but don't do end-of-sentence spacing. % \def\abbr#1{\doabbr #1,,\finish} \def\doabbr#1,#2,#3\finish{% {\plainfrenchspacing #1}% \def\temp{#2}% \ifx\temp\empty \else \space ({\unsepspaces \ignorespaces \temp \unskip})% \fi } % @pounds{} is a sterling sign, which Knuth put in the CM italic font. % \def\pounds{{\it\$}} % @euro{} comes from a separate font, depending on the current style. % We use the free feym* fonts from the eurosym package by Henrik % Theiling, which support regular, slanted, bold and bold slanted (and % "outlined" (blackboard board, sort of) versions, which we don't need). % It is available from http://www.ctan.org/tex-archive/fonts/eurosym. % % Although only regular is the truly official Euro symbol, we ignore % that. The Euro is designed to be slightly taller than the regular % font height. % % feymr - regular % feymo - slanted % feybr - bold % feybo - bold slanted % % There is no good (free) typewriter version, to my knowledge. % A feymr10 euro is ~7.3pt wide, while a normal cmtt10 char is ~5.25pt wide. % Hmm. % % Also doesn't work in math. Do we need to do math with euro symbols? % Hope not. % % \def\euro{{\eurofont e}} \def\eurofont{% % We set the font at each command, rather than predefining it in % \textfonts and the other font-switching commands, so that % installations which never need the symbol don't have to have the % font installed. % % There is only one designed size (nominal 10pt), so we always scale % that to the current nominal size. % % By the way, simply using "at 1em" works for cmr10 and the like, but % does not work for cmbx10 and other extended/shrunken fonts. % \def\eurosize{\csname\curfontsize nominalsize\endcsname}% % \ifx\curfontstyle\bfstylename % bold: \font\thiseurofont = \ifusingit{feybo10}{feybr10} at \eurosize \else % regular: \font\thiseurofont = \ifusingit{feymo10}{feymr10} at \eurosize \fi \thiseurofont } % @registeredsymbol - R in a circle. The font for the R should really % be smaller yet, but lllsize is the best we can do for now. % Adapted from the plain.tex definition of \copyright. % \def\registeredsymbol{% $^{{\ooalign{\hfil\raise.07ex\hbox{\selectfonts\lllsize R}% \hfil\crcr\Orb}}% }$% } % @textdegree - the normal degrees sign. % \def\textdegree{$^\circ$} % Laurent Siebenmann reports \Orb undefined with: % Textures 1.7.7 (preloaded format=plain 93.10.14) (68K) 16 APR 2004 02:38 % so we'll define it if necessary. % \ifx\Orb\undefined \def\Orb{\mathhexbox20D} \fi \message{page headings,} \newskip\titlepagetopglue \titlepagetopglue = 1.5in \newskip\titlepagebottomglue \titlepagebottomglue = 2pc % First the title page. Must do @settitle before @titlepage. \newif\ifseenauthor \newif\iffinishedtitlepage % Do an implicit @contents or @shortcontents after @end titlepage if the % user says @setcontentsaftertitlepage or @setshortcontentsaftertitlepage. % \newif\ifsetcontentsaftertitlepage \let\setcontentsaftertitlepage = \setcontentsaftertitlepagetrue \newif\ifsetshortcontentsaftertitlepage \let\setshortcontentsaftertitlepage = \setshortcontentsaftertitlepagetrue \parseargdef\shorttitlepage{\begingroup\hbox{}\vskip 1.5in \chaprm \centerline{#1}% \endgroup\page\hbox{}\page} \envdef\titlepage{% % Open one extra group, as we want to close it in the middle of \Etitlepage. \begingroup \parindent=0pt \textfonts % Leave some space at the very top of the page. \vglue\titlepagetopglue % No rule at page bottom unless we print one at the top with @title. \finishedtitlepagetrue % % Most title ``pages'' are actually two pages long, with space % at the top of the second. We don't want the ragged left on the second. \let\oldpage = \page \def\page{% \iffinishedtitlepage\else \finishtitlepage \fi \let\page = \oldpage \page \null }% } \def\Etitlepage{% \iffinishedtitlepage\else \finishtitlepage \fi % It is important to do the page break before ending the group, % because the headline and footline are only empty inside the group. % If we use the new definition of \page, we always get a blank page % after the title page, which we certainly don't want. \oldpage \endgroup % % Need this before the \...aftertitlepage checks so that if they are % in effect the toc pages will come out with page numbers. \HEADINGSon % % If they want short, they certainly want long too. \ifsetshortcontentsaftertitlepage \shortcontents \contents \global\let\shortcontents = \relax \global\let\contents = \relax \fi % \ifsetcontentsaftertitlepage \contents \global\let\contents = \relax \global\let\shortcontents = \relax \fi } \def\finishtitlepage{% \vskip4pt \hrule height 2pt width \hsize \vskip\titlepagebottomglue \finishedtitlepagetrue } %%% Macros to be used within @titlepage: \let\subtitlerm=\tenrm \def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines} \def\authorfont{\authorrm \normalbaselineskip = 16pt \normalbaselines \let\tt=\authortt} \parseargdef\title{% \checkenv\titlepage \leftline{\titlefonts\rm #1} % print a rule at the page bottom also. \finishedtitlepagefalse \vskip4pt \hrule height 4pt width \hsize \vskip4pt } \parseargdef\subtitle{% \checkenv\titlepage {\subtitlefont \rightline{#1}}% } % @author should come last, but may come many times. % It can also be used inside @quotation. % \parseargdef\author{% \def\temp{\quotation}% \ifx\thisenv\temp \def\quotationauthor{#1}% printed in \Equotation. \else \checkenv\titlepage \ifseenauthor\else \vskip 0pt plus 1filll \seenauthortrue \fi {\authorfont \leftline{#1}}% \fi } %%% Set up page headings and footings. \let\thispage=\folio \newtoks\evenheadline % headline on even pages \newtoks\oddheadline % headline on odd pages \newtoks\evenfootline % footline on even pages \newtoks\oddfootline % footline on odd pages % Now make TeX use those variables \headline={{\textfonts\rm \ifodd\pageno \the\oddheadline \else \the\evenheadline \fi}} \footline={{\textfonts\rm \ifodd\pageno \the\oddfootline \else \the\evenfootline \fi}\HEADINGShook} \let\HEADINGShook=\relax % Commands to set those variables. % For example, this is what @headings on does % @evenheading @thistitle|@thispage|@thischapter % @oddheading @thischapter|@thispage|@thistitle % @evenfooting @thisfile|| % @oddfooting ||@thisfile \def\evenheading{\parsearg\evenheadingxxx} \def\evenheadingxxx #1{\evenheadingyyy #1\|\|\|\|\finish} \def\evenheadingyyy #1\|#2\|#3\|#4\finish{% \global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}} \def\oddheading{\parsearg\oddheadingxxx} \def\oddheadingxxx #1{\oddheadingyyy #1\|\|\|\|\finish} \def\oddheadingyyy #1\|#2\|#3\|#4\finish{% \global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}} \parseargdef\everyheading{\oddheadingxxx{#1}\evenheadingxxx{#1}}% \def\evenfooting{\parsearg\evenfootingxxx} \def\evenfootingxxx #1{\evenfootingyyy #1\|\|\|\|\finish} \def\evenfootingyyy #1\|#2\|#3\|#4\finish{% \global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}} \def\oddfooting{\parsearg\oddfootingxxx} \def\oddfootingxxx #1{\oddfootingyyy #1\|\|\|\|\finish} \def\oddfootingyyy #1\|#2\|#3\|#4\finish{% \global\oddfootline = {\rlap{\centerline{#2}}\line{#1\hfil#3}}% % % Leave some space for the footline. Hopefully ok to assume % @evenfooting will not be used by itself. \global\advance\pageheight by -12pt \global\advance\vsize by -12pt } \parseargdef\everyfooting{\oddfootingxxx{#1}\evenfootingxxx{#1}} % @headings double turns headings on for double-sided printing. % @headings single turns headings on for single-sided printing. % @headings off turns them off. % @headings on same as @headings double, retained for compatibility. % @headings after turns on double-sided headings after this page. % @headings doubleafter turns on double-sided headings after this page. % @headings singleafter turns on single-sided headings after this page. % By default, they are off at the start of a document, % and turned `on' after @end titlepage. \def\headings #1 {\csname HEADINGS#1\endcsname} \def\HEADINGSoff{% \global\evenheadline={\hfil} \global\evenfootline={\hfil} \global\oddheadline={\hfil} \global\oddfootline={\hfil}} \HEADINGSoff % When we turn headings on, set the page number to 1. % For double-sided printing, put current file name in lower left corner, % chapter name on inside top of right hand pages, document % title on inside top of left hand pages, and page numbers on outside top % edge of all pages. \def\HEADINGSdouble{% \global\pageno=1 \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\folio\hfil\thistitle}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\let\contentsalignmacro = \chapoddpage } \let\contentsalignmacro = \chappager % For single-sided printing, chapter title goes across top left of page, % page number on top right. \def\HEADINGSsingle{% \global\pageno=1 \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\thischapter\hfil\folio}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\let\contentsalignmacro = \chappager } \def\HEADINGSon{\HEADINGSdouble} \def\HEADINGSafter{\let\HEADINGShook=\HEADINGSdoublex} \let\HEADINGSdoubleafter=\HEADINGSafter \def\HEADINGSdoublex{% \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\folio\hfil\thistitle}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\let\contentsalignmacro = \chapoddpage } \def\HEADINGSsingleafter{\let\HEADINGShook=\HEADINGSsinglex} \def\HEADINGSsinglex{% \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\thischapter\hfil\folio}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\let\contentsalignmacro = \chappager } % Subroutines used in generating headings % This produces Day Month Year style of output. % Only define if not already defined, in case a txi-??.tex file has set % up a different format (e.g., txi-cs.tex does this). \ifx\today\undefined \def\today{% \number\day\space \ifcase\month \or\putwordMJan\or\putwordMFeb\or\putwordMMar\or\putwordMApr \or\putwordMMay\or\putwordMJun\or\putwordMJul\or\putwordMAug \or\putwordMSep\or\putwordMOct\or\putwordMNov\or\putwordMDec \fi \space\number\year} \fi % @settitle line... specifies the title of the document, for headings. % It generates no output of its own. \def\thistitle{\putwordNoTitle} \def\settitle{\parsearg{\gdef\thistitle}} \message{tables,} % Tables -- @table, @ftable, @vtable, @item(x). % default indentation of table text \newdimen\tableindent \tableindent=.8in % default indentation of @itemize and @enumerate text \newdimen\itemindent \itemindent=.3in % margin between end of table item and start of table text. \newdimen\itemmargin \itemmargin=.1in % used internally for \itemindent minus \itemmargin \newdimen\itemmax % Note @table, @ftable, and @vtable define @item, @itemx, etc., with % these defs. % They also define \itemindex % to index the item name in whatever manner is desired (perhaps none). \newif\ifitemxneedsnegativevskip \def\itemxpar{\par\ifitemxneedsnegativevskip\nobreak\vskip-\parskip\nobreak\fi} \def\internalBitem{\smallbreak \parsearg\itemzzz} \def\internalBitemx{\itemxpar \parsearg\itemzzz} \def\itemzzz #1{\begingroup % \advance\hsize by -\rightskip \advance\hsize by -\tableindent \setbox0=\hbox{\itemindicate{#1}}% \itemindex{#1}% \nobreak % This prevents a break before @itemx. % % If the item text does not fit in the space we have, put it on a line % by itself, and do not allow a page break either before or after that % line. We do not start a paragraph here because then if the next % command is, e.g., @kindex, the whatsit would get put into the % horizontal list on a line by itself, resulting in extra blank space. \ifdim \wd0>\itemmax % % Make this a paragraph so we get the \parskip glue and wrapping, % but leave it ragged-right. \begingroup \advance\leftskip by-\tableindent \advance\hsize by\tableindent \advance\rightskip by0pt plus1fil \leavevmode\unhbox0\par \endgroup % % We're going to be starting a paragraph, but we don't want the % \parskip glue -- logically it's part of the @item we just started. \nobreak \vskip-\parskip % % Stop a page break at the \parskip glue coming up. However, if % what follows is an environment such as @example, there will be no % \parskip glue; then the negative vskip we just inserted would % cause the example and the item to crash together. So we use this % bizarre value of 10001 as a signal to \aboveenvbreak to insert % \parskip glue after all. Section titles are handled this way also. % \penalty 10001 \endgroup \itemxneedsnegativevskipfalse \else % The item text fits into the space. Start a paragraph, so that the % following text (if any) will end up on the same line. \noindent % Do this with kerns and \unhbox so that if there is a footnote in % the item text, it can migrate to the main vertical list and % eventually be printed. \nobreak\kern-\tableindent \dimen0 = \itemmax \advance\dimen0 by \itemmargin \advance\dimen0 by -\wd0 \unhbox0 \nobreak\kern\dimen0 \endgroup \itemxneedsnegativevskiptrue \fi } \def\item{\errmessage{@item while not in a list environment}} \def\itemx{\errmessage{@itemx while not in a list environment}} % @table, @ftable, @vtable. \envdef\table{% \let\itemindex\gobble \tablecheck{table}% } \envdef\ftable{% \def\itemindex ##1{\doind {fn}{\code{##1}}}% \tablecheck{ftable}% } \envdef\vtable{% \def\itemindex ##1{\doind {vr}{\code{##1}}}% \tablecheck{vtable}% } \def\tablecheck#1{% \ifnum \the\catcode`\^^M=\active \endgroup \errmessage{This command won't work in this context; perhaps the problem is that we are \inenvironment\thisenv}% \def\next{\doignore{#1}}% \else \let\next\tablex \fi \next } \def\tablex#1{% \def\itemindicate{#1}% \parsearg\tabley } \def\tabley#1{% {% \makevalueexpandable \edef\temp{\noexpand\tablez #1\space\space\space}% \expandafter }\temp \endtablez } \def\tablez #1 #2 #3 #4\endtablez{% \aboveenvbreak \ifnum 0#1>0 \advance \leftskip by #1\mil \fi \ifnum 0#2>0 \tableindent=#2\mil \fi \ifnum 0#3>0 \advance \rightskip by #3\mil \fi \itemmax=\tableindent \advance \itemmax by -\itemmargin \advance \leftskip by \tableindent \exdentamount=\tableindent \parindent = 0pt \parskip = \smallskipamount \ifdim \parskip=0pt \parskip=2pt \fi \let\item = \internalBitem \let\itemx = \internalBitemx } \def\Etable{\endgraf\afterenvbreak} \let\Eftable\Etable \let\Evtable\Etable \let\Eitemize\Etable \let\Eenumerate\Etable % This is the counter used by @enumerate, which is really @itemize \newcount \itemno \envdef\itemize{\parsearg\doitemize} \def\doitemize#1{% \aboveenvbreak \itemmax=\itemindent \advance\itemmax by -\itemmargin \advance\leftskip by \itemindent \exdentamount=\itemindent \parindent=0pt \parskip=\smallskipamount \ifdim\parskip=0pt \parskip=2pt \fi \def\itemcontents{#1}% % @itemize with no arg is equivalent to @itemize @bullet. \ifx\itemcontents\empty\def\itemcontents{\bullet}\fi \let\item=\itemizeitem } % Definition of @item while inside @itemize and @enumerate. % \def\itemizeitem{% \advance\itemno by 1 % for enumerations {\let\par=\endgraf \smallbreak}% reasonable place to break {% % If the document has an @itemize directly after a section title, a % \nobreak will be last on the list, and \sectionheading will have % done a \vskip-\parskip. In that case, we don't want to zero % parskip, or the item text will crash with the heading. On the % other hand, when there is normal text preceding the item (as there % usually is), we do want to zero parskip, or there would be too much % space. In that case, we won't have a \nobreak before. At least % that's the theory. \ifnum\lastpenalty<10000 \parskip=0in \fi \noindent \hbox to 0pt{\hss \itemcontents \kern\itemmargin}% \vadjust{\penalty 1200}}% not good to break after first line of item. \flushcr } % \splitoff TOKENS\endmark defines \first to be the first token in % TOKENS, and \rest to be the remainder. % \def\splitoff#1#2\endmark{\def\first{#1}\def\rest{#2}}% % Allow an optional argument of an uppercase letter, lowercase letter, % or number, to specify the first label in the enumerated list. No % argument is the same as `1'. % \envparseargdef\enumerate{\enumeratey #1 \endenumeratey} \def\enumeratey #1 #2\endenumeratey{% % If we were given no argument, pretend we were given `1'. \def\thearg{#1}% \ifx\thearg\empty \def\thearg{1}\fi % % Detect if the argument is a single token. If so, it might be a % letter. Otherwise, the only valid thing it can be is a number. % (We will always have one token, because of the test we just made. % This is a good thing, since \splitoff doesn't work given nothing at % all -- the first parameter is undelimited.) \expandafter\splitoff\thearg\endmark \ifx\rest\empty % Only one token in the argument. It could still be anything. % A ``lowercase letter'' is one whose \lccode is nonzero. % An ``uppercase letter'' is one whose \lccode is both nonzero, and % not equal to itself. % Otherwise, we assume it's a number. % % We need the \relax at the end of the \ifnum lines to stop TeX from % continuing to look for a . % \ifnum\lccode\expandafter`\thearg=0\relax \numericenumerate % a number (we hope) \else % It's a letter. \ifnum\lccode\expandafter`\thearg=\expandafter`\thearg\relax \lowercaseenumerate % lowercase letter \else \uppercaseenumerate % uppercase letter \fi \fi \else % Multiple tokens in the argument. We hope it's a number. \numericenumerate \fi } % An @enumerate whose labels are integers. The starting integer is % given in \thearg. % \def\numericenumerate{% \itemno = \thearg \startenumeration{\the\itemno}% } % The starting (lowercase) letter is in \thearg. \def\lowercaseenumerate{% \itemno = \expandafter`\thearg \startenumeration{% % Be sure we're not beyond the end of the alphabet. \ifnum\itemno=0 \errmessage{No more lowercase letters in @enumerate; get a bigger alphabet}% \fi \char\lccode\itemno }% } % The starting (uppercase) letter is in \thearg. \def\uppercaseenumerate{% \itemno = \expandafter`\thearg \startenumeration{% % Be sure we're not beyond the end of the alphabet. \ifnum\itemno=0 \errmessage{No more uppercase letters in @enumerate; get a bigger alphabet} \fi \char\uccode\itemno }% } % Call \doitemize, adding a period to the first argument and supplying the % common last two arguments. Also subtract one from the initial value in % \itemno, since @item increments \itemno. % \def\startenumeration#1{% \advance\itemno by -1 \doitemize{#1.}\flushcr } % @alphaenumerate and @capsenumerate are abbreviations for giving an arg % to @enumerate. % \def\alphaenumerate{\enumerate{a}} \def\capsenumerate{\enumerate{A}} \def\Ealphaenumerate{\Eenumerate} \def\Ecapsenumerate{\Eenumerate} % @multitable macros % Amy Hendrickson, 8/18/94, 3/6/96 % % @multitable ... @end multitable will make as many columns as desired. % Contents of each column will wrap at width given in preamble. Width % can be specified either with sample text given in a template line, % or in percent of \hsize, the current width of text on page. % Table can continue over pages but will only break between lines. % To make preamble: % % Either define widths of columns in terms of percent of \hsize: % @multitable @columnfractions .25 .3 .45 % @item ... % % Numbers following @columnfractions are the percent of the total % current hsize to be used for each column. You may use as many % columns as desired. % Or use a template: % @multitable {Column 1 template} {Column 2 template} {Column 3 template} % @item ... % using the widest term desired in each column. % Each new table line starts with @item, each subsequent new column % starts with @tab. Empty columns may be produced by supplying @tab's % with nothing between them for as many times as empty columns are needed, % ie, @tab@tab@tab will produce two empty columns. % @item, @tab do not need to be on their own lines, but it will not hurt % if they are. % Sample multitable: % @multitable {Column 1 template} {Column 2 template} {Column 3 template} % @item first col stuff @tab second col stuff @tab third col % @item % first col stuff % @tab % second col stuff % @tab % third col % @item first col stuff @tab second col stuff % @tab Many paragraphs of text may be used in any column. % % They will wrap at the width determined by the template. % @item@tab@tab This will be in third column. % @end multitable % Default dimensions may be reset by user. % @multitableparskip is vertical space between paragraphs in table. % @multitableparindent is paragraph indent in table. % @multitablecolmargin is horizontal space to be left between columns. % @multitablelinespace is space to leave between table items, baseline % to baseline. % 0pt means it depends on current normal line spacing. % \newskip\multitableparskip \newskip\multitableparindent \newdimen\multitablecolspace \newskip\multitablelinespace \multitableparskip=0pt \multitableparindent=6pt \multitablecolspace=12pt \multitablelinespace=0pt % Macros used to set up halign preamble: % \let\endsetuptable\relax \def\xendsetuptable{\endsetuptable} \let\columnfractions\relax \def\xcolumnfractions{\columnfractions} \newif\ifsetpercent % #1 is the @columnfraction, usually a decimal number like .5, but might % be just 1. We just use it, whatever it is. % \def\pickupwholefraction#1 {% \global\advance\colcount by 1 \expandafter\xdef\csname col\the\colcount\endcsname{#1\hsize}% \setuptable } \newcount\colcount \def\setuptable#1{% \def\firstarg{#1}% \ifx\firstarg\xendsetuptable \let\go = \relax \else \ifx\firstarg\xcolumnfractions \global\setpercenttrue \else \ifsetpercent \let\go\pickupwholefraction \else \global\advance\colcount by 1 \setbox0=\hbox{#1\unskip\space}% Add a normal word space as a % separator; typically that is always in the input, anyway. \expandafter\xdef\csname col\the\colcount\endcsname{\the\wd0}% \fi \fi \ifx\go\pickupwholefraction % Put the argument back for the \pickupwholefraction call, so % we'll always have a period there to be parsed. \def\go{\pickupwholefraction#1}% \else \let\go = \setuptable \fi% \fi \go } % multitable-only commands. % % @headitem starts a heading row, which we typeset in bold. % Assignments have to be global since we are inside the implicit group % of an alignment entry. Note that \everycr resets \everytab. \def\headitem{\checkenv\multitable \crcr \global\everytab={\bf}\the\everytab}% % % A \tab used to include \hskip1sp. But then the space in a template % line is not enough. That is bad. So let's go back to just `&' until % we encounter the problem it was intended to solve again. % --karl, nathan@acm.org, 20apr99. \def\tab{\checkenv\multitable &\the\everytab}% % @multitable ... @end multitable definitions: % \newtoks\everytab % insert after every tab. % \envdef\multitable{% \vskip\parskip \startsavinginserts % % @item within a multitable starts a normal row. % We use \def instead of \let so that if one of the multitable entries % contains an @itemize, we don't choke on the \item (seen as \crcr aka % \endtemplate) expanding \doitemize. \def\item{\crcr}% % \tolerance=9500 \hbadness=9500 \setmultitablespacing \parskip=\multitableparskip \parindent=\multitableparindent \overfullrule=0pt \global\colcount=0 % \everycr = {% \noalign{% \global\everytab={}% \global\colcount=0 % Reset the column counter. % Check for saved footnotes, etc. \checkinserts % Keeps underfull box messages off when table breaks over pages. %\filbreak % Maybe so, but it also creates really weird page breaks when the % table breaks over pages. Wouldn't \vfil be better? Wait until the % problem manifests itself, so it can be fixed for real --karl. }% }% % \parsearg\domultitable } \def\domultitable#1{% % To parse everything between @multitable and @item: \setuptable#1 \endsetuptable % % This preamble sets up a generic column definition, which will % be used as many times as user calls for columns. % \vtop will set a single line and will also let text wrap and % continue for many paragraphs if desired. \halign\bgroup &% \global\advance\colcount by 1 \multistrut \vtop{% % Use the current \colcount to find the correct column width: \hsize=\expandafter\csname col\the\colcount\endcsname % % In order to keep entries from bumping into each other % we will add a \leftskip of \multitablecolspace to all columns after % the first one. % % If a template has been used, we will add \multitablecolspace % to the width of each template entry. % % If the user has set preamble in terms of percent of \hsize we will % use that dimension as the width of the column, and the \leftskip % will keep entries from bumping into each other. Table will start at % left margin and final column will justify at right margin. % % Make sure we don't inherit \rightskip from the outer environment. \rightskip=0pt \ifnum\colcount=1 % The first column will be indented with the surrounding text. \advance\hsize by\leftskip \else \ifsetpercent \else % If user has not set preamble in terms of percent of \hsize % we will advance \hsize by \multitablecolspace. \advance\hsize by \multitablecolspace \fi % In either case we will make \leftskip=\multitablecolspace: \leftskip=\multitablecolspace \fi % Ignoring space at the beginning and end avoids an occasional spurious % blank line, when TeX decides to break the line at the space before the % box from the multistrut, so the strut ends up on a line by itself. % For example: % @multitable @columnfractions .11 .89 % @item @code{#} % @tab Legal holiday which is valid in major parts of the whole country. % Is automatically provided with highlighting sequences respectively % marking characters. \noindent\ignorespaces##\unskip\multistrut }\cr } \def\Emultitable{% \crcr \egroup % end the \halign \global\setpercentfalse } \def\setmultitablespacing{% \def\multistrut{\strut}% just use the standard line spacing % % Compute \multitablelinespace (if not defined by user) for use in % \multitableparskip calculation. We used define \multistrut based on % this, but (ironically) that caused the spacing to be off. % See bug-texinfo report from Werner Lemberg, 31 Oct 2004 12:52:20 +0100. \ifdim\multitablelinespace=0pt \setbox0=\vbox{X}\global\multitablelinespace=\the\baselineskip \global\advance\multitablelinespace by-\ht0 \fi %% Test to see if parskip is larger than space between lines of %% table. If not, do nothing. %% If so, set to same dimension as multitablelinespace. \ifdim\multitableparskip>\multitablelinespace \global\multitableparskip=\multitablelinespace \global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller %% than skip between lines in the table. \fi% \ifdim\multitableparskip=0pt \global\multitableparskip=\multitablelinespace \global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller %% than skip between lines in the table. \fi} \message{conditionals,} % @iftex, @ifnotdocbook, @ifnothtml, @ifnotinfo, @ifnotplaintext, % @ifnotxml always succeed. They currently do nothing; we don't % attempt to check whether the conditionals are properly nested. But we % have to remember that they are conditionals, so that @end doesn't % attempt to close an environment group. % \def\makecond#1{% \expandafter\let\csname #1\endcsname = \relax \expandafter\let\csname iscond.#1\endcsname = 1 } \makecond{iftex} \makecond{ifnotdocbook} \makecond{ifnothtml} \makecond{ifnotinfo} \makecond{ifnotplaintext} \makecond{ifnotxml} % Ignore @ignore, @ifhtml, @ifinfo, and the like. % \def\direntry{\doignore{direntry}} \def\documentdescription{\doignore{documentdescription}} \def\docbook{\doignore{docbook}} \def\html{\doignore{html}} \def\ifdocbook{\doignore{ifdocbook}} \def\ifhtml{\doignore{ifhtml}} \def\ifinfo{\doignore{ifinfo}} \def\ifnottex{\doignore{ifnottex}} \def\ifplaintext{\doignore{ifplaintext}} \def\ifxml{\doignore{ifxml}} \def\ignore{\doignore{ignore}} \def\menu{\doignore{menu}} \def\xml{\doignore{xml}} % Ignore text until a line `@end #1', keeping track of nested conditionals. % % A count to remember the depth of nesting. \newcount\doignorecount \def\doignore#1{\begingroup % Scan in ``verbatim'' mode: \obeylines \catcode`\@ = \other \catcode`\{ = \other \catcode`\} = \other % % Make sure that spaces turn into tokens that match what \doignoretext wants. \spaceisspace % % Count number of #1's that we've seen. \doignorecount = 0 % % Swallow text until we reach the matching `@end #1'. \dodoignore{#1}% } { \catcode`_=11 % We want to use \_STOP_ which cannot appear in texinfo source. \obeylines % % \gdef\dodoignore#1{% % #1 contains the command name as a string, e.g., `ifinfo'. % % Define a command to find the next `@end #1'. \long\def\doignoretext##1^^M@end #1{% \doignoretextyyy##1^^M@#1\_STOP_}% % % And this command to find another #1 command, at the beginning of a % line. (Otherwise, we would consider a line `@c @ifset', for % example, to count as an @ifset for nesting.) \long\def\doignoretextyyy##1^^M@#1##2\_STOP_{\doignoreyyy{##2}\_STOP_}% % % And now expand that command. \doignoretext ^^M% }% } \def\doignoreyyy#1{% \def\temp{#1}% \ifx\temp\empty % Nothing found. \let\next\doignoretextzzz \else % Found a nested condition, ... \advance\doignorecount by 1 \let\next\doignoretextyyy % ..., look for another. % If we're here, #1 ends with ^^M\ifinfo (for example). \fi \next #1% the token \_STOP_ is present just after this macro. } % We have to swallow the remaining "\_STOP_". % \def\doignoretextzzz#1{% \ifnum\doignorecount = 0 % We have just found the outermost @end. \let\next\enddoignore \else % Still inside a nested condition. \advance\doignorecount by -1 \let\next\doignoretext % Look for the next @end. \fi \next } % Finish off ignored text. { \obeylines% % Ignore anything after the last `@end #1'; this matters in verbatim % environments, where otherwise the newline after an ignored conditional % would result in a blank line in the output. \gdef\enddoignore#1^^M{\endgroup\ignorespaces}% } % @set VAR sets the variable VAR to an empty value. % @set VAR REST-OF-LINE sets VAR to the value REST-OF-LINE. % % Since we want to separate VAR from REST-OF-LINE (which might be % empty), we can't just use \parsearg; we have to insert a space of our % own to delimit the rest of the line, and then take it out again if we % didn't need it. % We rely on the fact that \parsearg sets \catcode`\ =10. % \parseargdef\set{\setyyy#1 \endsetyyy} \def\setyyy#1 #2\endsetyyy{% {% \makevalueexpandable \def\temp{#2}% \edef\next{\gdef\makecsname{SET#1}}% \ifx\temp\empty \next{}% \else \setzzz#2\endsetzzz \fi }% } % Remove the trailing space \setxxx inserted. \def\setzzz#1 \endsetzzz{\next{#1}} % @clear VAR clears (i.e., unsets) the variable VAR. % \parseargdef\clear{% {% \makevalueexpandable \global\expandafter\let\csname SET#1\endcsname=\relax }% } % @value{foo} gets the text saved in variable foo. \def\value{\begingroup\makevalueexpandable\valuexxx} \def\valuexxx#1{\expandablevalue{#1}\endgroup} { \catcode`\- = \active \catcode`\_ = \active % \gdef\makevalueexpandable{% \let\value = \expandablevalue % We don't want these characters active, ... \catcode`\-=\other \catcode`\_=\other % ..., but we might end up with active ones in the argument if % we're called from @code, as @code{@value{foo-bar_}}, though. % So \let them to their normal equivalents. \let-\realdash \let_\normalunderscore } } % We have this subroutine so that we can handle at least some @value's % properly in indexes (we call \makevalueexpandable in \indexdummies). % The command has to be fully expandable (if the variable is set), since % the result winds up in the index file. This means that if the % variable's value contains other Texinfo commands, it's almost certain % it will fail (although perhaps we could fix that with sufficient work % to do a one-level expansion on the result, instead of complete). % \def\expandablevalue#1{% \expandafter\ifx\csname SET#1\endcsname\relax {[No value for ``#1'']}% \message{Variable `#1', used in @value, is not set.}% \else \csname SET#1\endcsname \fi } % @ifset VAR ... @end ifset reads the `...' iff VAR has been defined % with @set. % % To get special treatment of `@end ifset,' call \makeond and the redefine. % \makecond{ifset} \def\ifset{\parsearg{\doifset{\let\next=\ifsetfail}}} \def\doifset#1#2{% {% \makevalueexpandable \let\next=\empty \expandafter\ifx\csname SET#2\endcsname\relax #1% If not set, redefine \next. \fi \expandafter }\next } \def\ifsetfail{\doignore{ifset}} % @ifclear VAR ... @end ifclear reads the `...' iff VAR has never been % defined with @set, or has been undefined with @clear. % % The `\else' inside the `\doifset' parameter is a trick to reuse the % above code: if the variable is not set, do nothing, if it is set, % then redefine \next to \ifclearfail. % \makecond{ifclear} \def\ifclear{\parsearg{\doifset{\else \let\next=\ifclearfail}}} \def\ifclearfail{\doignore{ifclear}} % @dircategory CATEGORY -- specify a category of the dir file % which this file should belong to. Ignore this in TeX. \let\dircategory=\comment % @defininfoenclose. \let\definfoenclose=\comment \message{indexing,} % Index generation facilities % Define \newwrite to be identical to plain tex's \newwrite % except not \outer, so it can be used within macros and \if's. \edef\newwrite{\makecsname{ptexnewwrite}} % \newindex {foo} defines an index named foo. % It automatically defines \fooindex such that % \fooindex ...rest of line... puts an entry in the index foo. % It also defines \fooindfile to be the number of the output channel for % the file that accumulates this index. The file's extension is foo. % The name of an index should be no more than 2 characters long % for the sake of vms. % \def\newindex#1{% \iflinks \expandafter\newwrite \csname#1indfile\endcsname \openout \csname#1indfile\endcsname \jobname.#1 % Open the file \fi \expandafter\xdef\csname#1index\endcsname{% % Define @#1index \noexpand\doindex{#1}} } % @defindex foo == \newindex{foo} % \def\defindex{\parsearg\newindex} % Define @defcodeindex, like @defindex except put all entries in @code. % \def\defcodeindex{\parsearg\newcodeindex} % \def\newcodeindex#1{% \iflinks \expandafter\newwrite \csname#1indfile\endcsname \openout \csname#1indfile\endcsname \jobname.#1 \fi \expandafter\xdef\csname#1index\endcsname{% \noexpand\docodeindex{#1}}% } % @synindex foo bar makes index foo feed into index bar. % Do this instead of @defindex foo if you don't want it as a separate index. % % @syncodeindex foo bar similar, but put all entries made for index foo % inside @code. % \def\synindex#1 #2 {\dosynindex\doindex{#1}{#2}} \def\syncodeindex#1 #2 {\dosynindex\docodeindex{#1}{#2}} % #1 is \doindex or \docodeindex, #2 the index getting redefined (foo), % #3 the target index (bar). \def\dosynindex#1#2#3{% % Only do \closeout if we haven't already done it, else we'll end up % closing the target index. \expandafter \ifx\csname donesynindex#2\endcsname \undefined % The \closeout helps reduce unnecessary open files; the limit on the % Acorn RISC OS is a mere 16 files. \expandafter\closeout\csname#2indfile\endcsname \expandafter\let\csname\donesynindex#2\endcsname = 1 \fi % redefine \fooindfile: \expandafter\let\expandafter\temp\expandafter=\csname#3indfile\endcsname \expandafter\let\csname#2indfile\endcsname=\temp % redefine \fooindex: \expandafter\xdef\csname#2index\endcsname{\noexpand#1{#3}}% } % Define \doindex, the driver for all \fooindex macros. % Argument #1 is generated by the calling \fooindex macro, % and it is "foo", the name of the index. % \doindex just uses \parsearg; it calls \doind for the actual work. % This is because \doind is more useful to call from other macros. % There is also \dosubind {index}{topic}{subtopic} % which makes an entry in a two-level index such as the operation index. \def\doindex#1{\edef\indexname{#1}\parsearg\singleindexer} \def\singleindexer #1{\doind{\indexname}{#1}} % like the previous two, but they put @code around the argument. \def\docodeindex#1{\edef\indexname{#1}\parsearg\singlecodeindexer} \def\singlecodeindexer #1{\doind{\indexname}{\code{#1}}} % Take care of Texinfo commands that can appear in an index entry. % Since there are some commands we want to expand, and others we don't, % we have to laboriously prevent expansion for those that we don't. % \def\indexdummies{% \escapechar = `\\ % use backslash in output files. \def\@{@}% change to @@ when we switch to @ as escape char in index files. \def\ {\realbackslash\space }% % % Need these in case \tex is in effect and \{ is a \delimiter again. % But can't use \lbracecmd and \rbracecmd because texindex assumes % braces and backslashes are used only as delimiters. \let\{ = \mylbrace \let\} = \myrbrace % % I don't entirely understand this, but when an index entry is % generated from a macro call, the \endinput which \scanmacro inserts % causes processing to be prematurely terminated. This is, % apparently, because \indexsorttmp is fully expanded, and \endinput % is an expandable command. The redefinition below makes \endinput % disappear altogether for that purpose -- although logging shows that % processing continues to some further point. On the other hand, it % seems \endinput does not hurt in the printed index arg, since that % is still getting written without apparent harm. % % Sample source (mac-idx3.tex, reported by Graham Percival to % help-texinfo, 22may06): % @macro funindex {WORD} % @findex xyz % @end macro % ... % @funindex commtest % % The above is not enough to reproduce the bug, but it gives the flavor. % % Sample whatsit resulting: % .@write3{\entry{xyz}{@folio }{@code {xyz@endinput }}} % % So: \let\endinput = \empty % % Do the redefinitions. \commondummies } % For the aux and toc files, @ is the escape character. So we want to % redefine everything using @ as the escape character (instead of % \realbackslash, still used for index files). When everything uses @, % this will be simpler. % \def\atdummies{% \def\@{@@}% \def\ {@ }% \let\{ = \lbraceatcmd \let\} = \rbraceatcmd % % Do the redefinitions. \commondummies \otherbackslash } % Called from \indexdummies and \atdummies. % \def\commondummies{% % % \definedummyword defines \#1 as \string\#1\space, thus effectively % preventing its expansion. This is used only for control% words, % not control letters, because the \space would be incorrect for % control characters, but is needed to separate the control word % from whatever follows. % % For control letters, we have \definedummyletter, which omits the % space. % % These can be used both for control words that take an argument and % those that do not. If it is followed by {arg} in the input, then % that will dutifully get written to the index (or wherever). % \def\definedummyword ##1{\def##1{\string##1\space}}% \def\definedummyletter##1{\def##1{\string##1}}% \let\definedummyaccent\definedummyletter % \commondummiesnofonts % \definedummyletter\_% % % Non-English letters. \definedummyword\AA \definedummyword\AE \definedummyword\L \definedummyword\OE \definedummyword\O \definedummyword\aa \definedummyword\ae \definedummyword\l \definedummyword\oe \definedummyword\o \definedummyword\ss \definedummyword\exclamdown \definedummyword\questiondown \definedummyword\ordf \definedummyword\ordm % % Although these internal commands shouldn't show up, sometimes they do. \definedummyword\bf \definedummyword\gtr \definedummyword\hat \definedummyword\less \definedummyword\sf \definedummyword\sl \definedummyword\tclose \definedummyword\tt % \definedummyword\LaTeX \definedummyword\TeX % % Assorted special characters. \definedummyword\bullet \definedummyword\comma \definedummyword\copyright \definedummyword\registeredsymbol \definedummyword\dots \definedummyword\enddots \definedummyword\equiv \definedummyword\error \definedummyword\euro \definedummyword\expansion \definedummyword\minus \definedummyword\pounds \definedummyword\point \definedummyword\print \definedummyword\result \definedummyword\textdegree % % We want to disable all macros so that they are not expanded by \write. \macrolist % \normalturnoffactive % % Handle some cases of @value -- where it does not contain any % (non-fully-expandable) commands. \makevalueexpandable } % \commondummiesnofonts: common to \commondummies and \indexnofonts. % \def\commondummiesnofonts{% % Control letters and accents. \definedummyletter\!% \definedummyaccent\"% \definedummyaccent\'% \definedummyletter\*% \definedummyaccent\,% \definedummyletter\.% \definedummyletter\/% \definedummyletter\:% \definedummyaccent\=% \definedummyletter\?% \definedummyaccent\^% \definedummyaccent\`% \definedummyaccent\~% \definedummyword\u \definedummyword\v \definedummyword\H \definedummyword\dotaccent \definedummyword\ringaccent \definedummyword\tieaccent \definedummyword\ubaraccent \definedummyword\udotaccent \definedummyword\dotless % % Texinfo font commands. \definedummyword\b \definedummyword\i \definedummyword\r \definedummyword\sc \definedummyword\t % % Commands that take arguments. \definedummyword\acronym \definedummyword\cite \definedummyword\code \definedummyword\command \definedummyword\dfn \definedummyword\emph \definedummyword\env \definedummyword\file \definedummyword\kbd \definedummyword\key \definedummyword\math \definedummyword\option \definedummyword\pxref \definedummyword\ref \definedummyword\samp \definedummyword\strong \definedummyword\tie \definedummyword\uref \definedummyword\url \definedummyword\var \definedummyword\verb \definedummyword\w \definedummyword\xref } % \indexnofonts is used when outputting the strings to sort the index % by, and when constructing control sequence names. It eliminates all % control sequences and just writes whatever the best ASCII sort string % would be for a given command (usually its argument). % \def\indexnofonts{% % Accent commands should become @asis. \def\definedummyaccent##1{\let##1\asis}% % We can just ignore other control letters. \def\definedummyletter##1{\let##1\empty}% % Hopefully, all control words can become @asis. \let\definedummyword\definedummyaccent % \commondummiesnofonts % % Don't no-op \tt, since it isn't a user-level command % and is used in the definitions of the active chars like <, >, |, etc. % Likewise with the other plain tex font commands. %\let\tt=\asis % \def\ { }% \def\@{@}% % how to handle braces? \def\_{\normalunderscore}% % % Non-English letters. \def\AA{AA}% \def\AE{AE}% \def\L{L}% \def\OE{OE}% \def\O{O}% \def\aa{aa}% \def\ae{ae}% \def\l{l}% \def\oe{oe}% \def\o{o}% \def\ss{ss}% \def\exclamdown{!}% \def\questiondown{?}% \def\ordf{a}% \def\ordm{o}% % \def\LaTeX{LaTeX}% \def\TeX{TeX}% % % Assorted special characters. % (The following {} will end up in the sort string, but that's ok.) \def\bullet{bullet}% \def\comma{,}% \def\copyright{copyright}% \def\registeredsymbol{R}% \def\dots{...}% \def\enddots{...}% \def\equiv{==}% \def\error{error}% \def\euro{euro}% \def\expansion{==>}% \def\minus{-}% \def\pounds{pounds}% \def\point{.}% \def\print{-|}% \def\result{=>}% \def\textdegree{degrees}% % % We need to get rid of all macros, leaving only the arguments (if present). % Of course this is not nearly correct, but it is the best we can do for now. % makeinfo does not expand macros in the argument to @deffn, which ends up % writing an index entry, and texindex isn't prepared for an index sort entry % that starts with \. % % Since macro invocations are followed by braces, we can just redefine them % to take a single TeX argument. The case of a macro invocation that % goes to end-of-line is not handled. % \macrolist } \let\indexbackslash=0 %overridden during \printindex. \let\SETmarginindex=\relax % put index entries in margin (undocumented)? % Most index entries go through here, but \dosubind is the general case. % #1 is the index name, #2 is the entry text. \def\doind#1#2{\dosubind{#1}{#2}{}} % Workhorse for all \fooindexes. % #1 is name of index, #2 is stuff to put there, #3 is subentry -- % empty if called from \doind, as we usually are (the main exception % is with most defuns, which call us directly). % \def\dosubind#1#2#3{% \iflinks {% % Store the main index entry text (including the third arg). \toks0 = {#2}% % If third arg is present, precede it with a space. \def\thirdarg{#3}% \ifx\thirdarg\empty \else \toks0 = \expandafter{\the\toks0 \space #3}% \fi % \edef\writeto{\csname#1indfile\endcsname}% % \ifvmode \dosubindsanitize \else \dosubindwrite \fi }% \fi } % Write the entry in \toks0 to the index file: % \def\dosubindwrite{% % Put the index entry in the margin if desired. \ifx\SETmarginindex\relax\else \insert\margin{\hbox{\vrule height8pt depth3pt width0pt \the\toks0}}% \fi % % Remember, we are within a group. \indexdummies % Must do this here, since \bf, etc expand at this stage \def\backslashcurfont{\indexbackslash}% \indexbackslash isn't defined now % so it will be output as is; and it will print as backslash. % % Process the index entry with all font commands turned off, to % get the string to sort by. {\indexnofonts \edef\temp{\the\toks0}% need full expansion \xdef\indexsorttmp{\temp}% }% % % Set up the complete index entry, with both the sort key and % the original text, including any font commands. We write % three arguments to \entry to the .?? file (four in the % subentry case), texindex reduces to two when writing the .??s % sorted result. \edef\temp{% \write\writeto{% \string\entry{\indexsorttmp}{\noexpand\folio}{\the\toks0}}% }% \temp } % Take care of unwanted page breaks: % % If a skip is the last thing on the list now, preserve it % by backing up by \lastskip, doing the \write, then inserting % the skip again. Otherwise, the whatsit generated by the % \write will make \lastskip zero. The result is that sequences % like this: % @end defun % @tindex whatever % @defun ... % will have extra space inserted, because the \medbreak in the % start of the @defun won't see the skip inserted by the @end of % the previous defun. % % But don't do any of this if we're not in vertical mode. We % don't want to do a \vskip and prematurely end a paragraph. % % Avoid page breaks due to these extra skips, too. % % But wait, there is a catch there: % We'll have to check whether \lastskip is zero skip. \ifdim is not % sufficient for this purpose, as it ignores stretch and shrink parts % of the skip. The only way seems to be to check the textual % representation of the skip. % % The following is almost like \def\zeroskipmacro{0.0pt} except that % the ``p'' and ``t'' characters have catcode \other, not 11 (letter). % \edef\zeroskipmacro{\expandafter\the\csname z@skip\endcsname} % % ..., ready, GO: % \def\dosubindsanitize{% % \lastskip and \lastpenalty cannot both be nonzero simultaneously. \skip0 = \lastskip \edef\lastskipmacro{\the\lastskip}% \count255 = \lastpenalty % % If \lastskip is nonzero, that means the last item was a % skip. And since a skip is discardable, that means this % -\skip0 glue we're inserting is preceded by a % non-discardable item, therefore it is not a potential % breakpoint, therefore no \nobreak needed. \ifx\lastskipmacro\zeroskipmacro \else \vskip-\skip0 \fi % \dosubindwrite % \ifx\lastskipmacro\zeroskipmacro % If \lastskip was zero, perhaps the last item was a penalty, and % perhaps it was >=10000, e.g., a \nobreak. In that case, we want % to re-insert the same penalty (values >10000 are used for various % signals); since we just inserted a non-discardable item, any % following glue (such as a \parskip) would be a breakpoint. For example: % % @deffn deffn-whatever % @vindex index-whatever % Description. % would allow a break between the index-whatever whatsit % and the "Description." paragraph. \ifnum\count255>9999 \penalty\count255 \fi \else % On the other hand, if we had a nonzero \lastskip, % this make-up glue would be preceded by a non-discardable item % (the whatsit from the \write), so we must insert a \nobreak. \nobreak\vskip\skip0 \fi } % The index entry written in the file actually looks like % \entry {sortstring}{page}{topic} % or % \entry {sortstring}{page}{topic}{subtopic} % The texindex program reads in these files and writes files % containing these kinds of lines: % \initial {c} % before the first topic whose initial is c % \entry {topic}{pagelist} % for a topic that is used without subtopics % \primary {topic} % for the beginning of a topic that is used with subtopics % \secondary {subtopic}{pagelist} % for each subtopic. % Define the user-accessible indexing commands % @findex, @vindex, @kindex, @cindex. \def\findex {\fnindex} \def\kindex {\kyindex} \def\cindex {\cpindex} \def\vindex {\vrindex} \def\tindex {\tpindex} \def\pindex {\pgindex} \def\cindexsub {\begingroup\obeylines\cindexsub} {\obeylines % \gdef\cindexsub "#1" #2^^M{\endgroup % \dosubind{cp}{#2}{#1}}} % Define the macros used in formatting output of the sorted index material. % @printindex causes a particular index (the ??s file) to get printed. % It does not print any chapter heading (usually an @unnumbered). % \parseargdef\printindex{\begingroup \dobreak \chapheadingskip{10000}% % \smallfonts \rm \tolerance = 9500 \everypar = {}% don't want the \kern\-parindent from indentation suppression. % % See if the index file exists and is nonempty. % Change catcode of @ here so that if the index file contains % \initial {@} % as its first line, TeX doesn't complain about mismatched braces % (because it thinks @} is a control sequence). \catcode`\@ = 11 \openin 1 \jobname.#1s \ifeof 1 % \enddoublecolumns gets confused if there is no text in the index, % and it loses the chapter title and the aux file entries for the % index. The easiest way to prevent this problem is to make sure % there is some text. \putwordIndexNonexistent \else % % If the index file exists but is empty, then \openin leaves \ifeof % false. We have to make TeX try to read something from the file, so % it can discover if there is anything in it. \read 1 to \temp \ifeof 1 \putwordIndexIsEmpty \else % Index files are almost Texinfo source, but we use \ as the escape % character. It would be better to use @, but that's too big a change % to make right now. \def\indexbackslash{\backslashcurfont}% \catcode`\\ = 0 \escapechar = `\\ \begindoublecolumns \input \jobname.#1s \enddoublecolumns \fi \fi \closein 1 \endgroup} % These macros are used by the sorted index file itself. % Change them to control the appearance of the index. \def\initial#1{{% % Some minor font changes for the special characters. \let\tentt=\sectt \let\tt=\sectt \let\sf=\sectt % % Remove any glue we may have, we'll be inserting our own. \removelastskip % % We like breaks before the index initials, so insert a bonus. \nobreak \vskip 0pt plus 3\baselineskip \penalty 0 \vskip 0pt plus -3\baselineskip % % Typeset the initial. Making this add up to a whole number of % baselineskips increases the chance of the dots lining up from column % to column. It still won't often be perfect, because of the stretch % we need before each entry, but it's better. % % No shrink because it confuses \balancecolumns. \vskip 1.67\baselineskip plus .5\baselineskip \leftline{\secbf #1}% % Do our best not to break after the initial. \nobreak \vskip .33\baselineskip plus .1\baselineskip }} % \entry typesets a paragraph consisting of the text (#1), dot leaders, and % then page number (#2) flushed to the right margin. It is used for index % and table of contents entries. The paragraph is indented by \leftskip. % % A straightforward implementation would start like this: % \def\entry#1#2{... % But this frozes the catcodes in the argument, and can cause problems to % @code, which sets - active. This problem was fixed by a kludge--- % ``-'' was active throughout whole index, but this isn't really right. % % The right solution is to prevent \entry from swallowing the whole text. % --kasal, 21nov03 \def\entry{% \begingroup % % Start a new paragraph if necessary, so our assignments below can't % affect previous text. \par % % Do not fill out the last line with white space. \parfillskip = 0in % % No extra space above this paragraph. \parskip = 0in % % Do not prefer a separate line ending with a hyphen to fewer lines. \finalhyphendemerits = 0 % % \hangindent is only relevant when the entry text and page number % don't both fit on one line. In that case, bob suggests starting the % dots pretty far over on the line. Unfortunately, a large % indentation looks wrong when the entry text itself is broken across % lines. So we use a small indentation and put up with long leaders. % % \hangafter is reset to 1 (which is the value we want) at the start % of each paragraph, so we need not do anything with that. \hangindent = 2em % % When the entry text needs to be broken, just fill out the first line % with blank space. \rightskip = 0pt plus1fil % % A bit of stretch before each entry for the benefit of balancing % columns. \vskip 0pt plus1pt % % Swallow the left brace of the text (first parameter): \afterassignment\doentry \let\temp = } \def\doentry{% \bgroup % Instead of the swallowed brace. \noindent \aftergroup\finishentry % And now comes the text of the entry. } \def\finishentry#1{% % #1 is the page number. % % The following is kludged to not output a line of dots in the index if % there are no page numbers. The next person who breaks this will be % cursed by a Unix daemon. \def\tempa{{\rm }}% \def\tempb{#1}% \edef\tempc{\tempa}% \edef\tempd{\tempb}% \ifx\tempc\tempd \ % \else % % If we must, put the page number on a line of its own, and fill out % this line with blank space. (The \hfil is overwhelmed with the % fill leaders glue in \indexdotfill if the page number does fit.) \hfil\penalty50 \null\nobreak\indexdotfill % Have leaders before the page number. % % The `\ ' here is removed by the implicit \unskip that TeX does as % part of (the primitive) \par. Without it, a spurious underfull % \hbox ensues. \ifpdf \pdfgettoks#1.% \ \the\toksA \else \ #1% \fi \fi \par \endgroup } % Like plain.tex's \dotfill, except uses up at least 1 em. \def\indexdotfill{\cleaders \hbox{$\mathsurround=0pt \mkern1.5mu.\mkern1.5mu$}\hskip 1em plus 1fill} \def\primary #1{\line{#1\hfil}} \newskip\secondaryindent \secondaryindent=0.5cm \def\secondary#1#2{{% \parfillskip=0in \parskip=0in \hangindent=1in \hangafter=1 \noindent\hskip\secondaryindent\hbox{#1}\indexdotfill \ifpdf \pdfgettoks#2.\ \the\toksA % The page number ends the paragraph. \else #2 \fi \par }} % Define two-column mode, which we use to typeset indexes. % Adapted from the TeXbook, page 416, which is to say, % the manmac.tex format used to print the TeXbook itself. \catcode`\@=11 \newbox\partialpage \newdimen\doublecolumnhsize \def\begindoublecolumns{\begingroup % ended by \enddoublecolumns % Grab any single-column material above us. \output = {% % % Here is a possibility not foreseen in manmac: if we accumulate a % whole lot of material, we might end up calling this \output % routine twice in a row (see the doublecol-lose test, which is % essentially a couple of indexes with @setchapternewpage off). In % that case we just ship out what is in \partialpage with the normal % output routine. Generally, \partialpage will be empty when this % runs and this will be a no-op. See the indexspread.tex test case. \ifvoid\partialpage \else \onepageout{\pagecontents\partialpage}% \fi % \global\setbox\partialpage = \vbox{% % Unvbox the main output page. \unvbox\PAGE \kern-\topskip \kern\baselineskip }% }% \eject % run that output routine to set \partialpage % % Use the double-column output routine for subsequent pages. \output = {\doublecolumnout}% % % Change the page size parameters. We could do this once outside this % routine, in each of @smallbook, @afourpaper, and the default 8.5x11 % format, but then we repeat the same computation. Repeating a couple % of assignments once per index is clearly meaningless for the % execution time, so we may as well do it in one place. % % First we halve the line length, less a little for the gutter between % the columns. We compute the gutter based on the line length, so it % changes automatically with the paper format. The magic constant % below is chosen so that the gutter has the same value (well, +-<1pt) % as it did when we hard-coded it. % % We put the result in a separate register, \doublecolumhsize, so we % can restore it in \pagesofar, after \hsize itself has (potentially) % been clobbered. % \doublecolumnhsize = \hsize \advance\doublecolumnhsize by -.04154\hsize \divide\doublecolumnhsize by 2 \hsize = \doublecolumnhsize % % Double the \vsize as well. (We don't need a separate register here, % since nobody clobbers \vsize.) \vsize = 2\vsize } % The double-column output routine for all double-column pages except % the last. % \def\doublecolumnout{% \splittopskip=\topskip \splitmaxdepth=\maxdepth % Get the available space for the double columns -- the normal % (undoubled) page height minus any material left over from the % previous page. \dimen@ = \vsize \divide\dimen@ by 2 \advance\dimen@ by -\ht\partialpage % % box0 will be the left-hand column, box2 the right. \setbox0=\vsplit255 to\dimen@ \setbox2=\vsplit255 to\dimen@ \onepageout\pagesofar \unvbox255 \penalty\outputpenalty } % % Re-output the contents of the output page -- any previous material, % followed by the two boxes we just split, in box0 and box2. \def\pagesofar{% \unvbox\partialpage % \hsize = \doublecolumnhsize \wd0=\hsize \wd2=\hsize \hbox to\pagewidth{\box0\hfil\box2}% } % % All done with double columns. \def\enddoublecolumns{% \output = {% % Split the last of the double-column material. Leave it on the % current page, no automatic page break. \balancecolumns % % If we end up splitting too much material for the current page, % though, there will be another page break right after this \output % invocation ends. Having called \balancecolumns once, we do not % want to call it again. Therefore, reset \output to its normal % definition right away. (We hope \balancecolumns will never be % called on to balance too much material, but if it is, this makes % the output somewhat more palatable.) \global\output = {\onepageout{\pagecontents\PAGE}}% }% \eject \endgroup % started in \begindoublecolumns % % \pagegoal was set to the doubled \vsize above, since we restarted % the current page. We're now back to normal single-column % typesetting, so reset \pagegoal to the normal \vsize (after the % \endgroup where \vsize got restored). \pagegoal = \vsize } % % Called at the end of the double column material. \def\balancecolumns{% \setbox0 = \vbox{\unvbox255}% like \box255 but more efficient, see p.120. \dimen@ = \ht0 \advance\dimen@ by \topskip \advance\dimen@ by-\baselineskip \divide\dimen@ by 2 % target to split to %debug\message{final 2-column material height=\the\ht0, target=\the\dimen@.}% \splittopskip = \topskip % Loop until we get a decent breakpoint. {% \vbadness = 10000 \loop \global\setbox3 = \copy0 \global\setbox1 = \vsplit3 to \dimen@ \ifdim\ht3>\dimen@ \global\advance\dimen@ by 1pt \repeat }% %debug\message{split to \the\dimen@, column heights: \the\ht1, \the\ht3.}% \setbox0=\vbox to\dimen@{\unvbox1}% \setbox2=\vbox to\dimen@{\unvbox3}% % \pagesofar } \catcode`\@ = \other \message{sectioning,} % Chapters, sections, etc. % \unnumberedno is an oxymoron, of course. But we count the unnumbered % sections so that we can refer to them unambiguously in the pdf % outlines by their "section number". We avoid collisions with chapter % numbers by starting them at 10000. (If a document ever has 10000 % chapters, we're in trouble anyway, I'm sure.) \newcount\unnumberedno \unnumberedno = 10000 \newcount\chapno \newcount\secno \secno=0 \newcount\subsecno \subsecno=0 \newcount\subsubsecno \subsubsecno=0 % This counter is funny since it counts through charcodes of letters A, B, ... \newcount\appendixno \appendixno = `\@ % % \def\appendixletter{\char\the\appendixno} % We do the following ugly conditional instead of the above simple % construct for the sake of pdftex, which needs the actual % letter in the expansion, not just typeset. % \def\appendixletter{% \ifnum\appendixno=`A A% \else\ifnum\appendixno=`B B% \else\ifnum\appendixno=`C C% \else\ifnum\appendixno=`D D% \else\ifnum\appendixno=`E E% \else\ifnum\appendixno=`F F% \else\ifnum\appendixno=`G G% \else\ifnum\appendixno=`H H% \else\ifnum\appendixno=`I I% \else\ifnum\appendixno=`J J% \else\ifnum\appendixno=`K K% \else\ifnum\appendixno=`L L% \else\ifnum\appendixno=`M M% \else\ifnum\appendixno=`N N% \else\ifnum\appendixno=`O O% \else\ifnum\appendixno=`P P% \else\ifnum\appendixno=`Q Q% \else\ifnum\appendixno=`R R% \else\ifnum\appendixno=`S S% \else\ifnum\appendixno=`T T% \else\ifnum\appendixno=`U U% \else\ifnum\appendixno=`V V% \else\ifnum\appendixno=`W W% \else\ifnum\appendixno=`X X% \else\ifnum\appendixno=`Y Y% \else\ifnum\appendixno=`Z Z% % The \the is necessary, despite appearances, because \appendixletter is % expanded while writing the .toc file. \char\appendixno is not % expandable, thus it is written literally, thus all appendixes come out % with the same letter (or @) in the toc without it. \else\char\the\appendixno \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi} % Each @chapter defines this as the name of the chapter. % page headings and footings can use it. @section does likewise. % However, they are not reliable, because we don't use marks. \def\thischapter{} \def\thissection{} \newcount\absseclevel % used to calculate proper heading level \newcount\secbase\secbase=0 % @raisesections/@lowersections modify this count % @raisesections: treat @section as chapter, @subsection as section, etc. \def\raisesections{\global\advance\secbase by -1} \let\up=\raisesections % original BFox name % @lowersections: treat @chapter as section, @section as subsection, etc. \def\lowersections{\global\advance\secbase by 1} \let\down=\lowersections % original BFox name % we only have subsub. \chardef\maxseclevel = 3 % % A numbered section within an unnumbered changes to unnumbered too. % To achive this, remember the "biggest" unnum. sec. we are currently in: \chardef\unmlevel = \maxseclevel % % Trace whether the current chapter is an appendix or not: % \chapheadtype is "N" or "A", unnumbered chapters are ignored. \def\chapheadtype{N} % Choose a heading macro % #1 is heading type % #2 is heading level % #3 is text for heading \def\genhead#1#2#3{% % Compute the abs. sec. level: \absseclevel=#2 \advance\absseclevel by \secbase % Make sure \absseclevel doesn't fall outside the range: \ifnum \absseclevel < 0 \absseclevel = 0 \else \ifnum \absseclevel > 3 \absseclevel = 3 \fi \fi % The heading type: \def\headtype{#1}% \if \headtype U% \ifnum \absseclevel < \unmlevel \chardef\unmlevel = \absseclevel \fi \else % Check for appendix sections: \ifnum \absseclevel = 0 \edef\chapheadtype{\headtype}% \else \if \headtype A\if \chapheadtype N% \errmessage{@appendix... within a non-appendix chapter}% \fi\fi \fi % Check for numbered within unnumbered: \ifnum \absseclevel > \unmlevel \def\headtype{U}% \else \chardef\unmlevel = 3 \fi \fi % Now print the heading: \if \headtype U% \ifcase\absseclevel \unnumberedzzz{#3}% \or \unnumberedseczzz{#3}% \or \unnumberedsubseczzz{#3}% \or \unnumberedsubsubseczzz{#3}% \fi \else \if \headtype A% \ifcase\absseclevel \appendixzzz{#3}% \or \appendixsectionzzz{#3}% \or \appendixsubseczzz{#3}% \or \appendixsubsubseczzz{#3}% \fi \else \ifcase\absseclevel \chapterzzz{#3}% \or \seczzz{#3}% \or \numberedsubseczzz{#3}% \or \numberedsubsubseczzz{#3}% \fi \fi \fi \suppressfirstparagraphindent } % an interface: \def\numhead{\genhead N} \def\apphead{\genhead A} \def\unnmhead{\genhead U} % @chapter, @appendix, @unnumbered. Increment top-level counter, reset % all lower-level sectioning counters to zero. % % Also set \chaplevelprefix, which we prepend to @float sequence numbers % (e.g., figures), q.v. By default (before any chapter), that is empty. \let\chaplevelprefix = \empty % \outer\parseargdef\chapter{\numhead0{#1}} % normally numhead0 calls chapterzzz \def\chapterzzz#1{% % section resetting is \global in case the chapter is in a group, such % as an @include file. \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\chapno by 1 % % Used for \float. \gdef\chaplevelprefix{\the\chapno.}% \resetallfloatnos % \message{\putwordChapter\space \the\chapno}% % % Write the actual heading. \chapmacro{#1}{Ynumbered}{\the\chapno}% % % So @section and the like are numbered underneath this chapter. \global\let\section = \numberedsec \global\let\subsection = \numberedsubsec \global\let\subsubsection = \numberedsubsubsec } \outer\parseargdef\appendix{\apphead0{#1}} % normally apphead0 calls appendixzzz \def\appendixzzz#1{% \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\appendixno by 1 \gdef\chaplevelprefix{\appendixletter.}% \resetallfloatnos % \def\appendixnum{\putwordAppendix\space \appendixletter}% \message{\appendixnum}% % \chapmacro{#1}{Yappendix}{\appendixletter}% % \global\let\section = \appendixsec \global\let\subsection = \appendixsubsec \global\let\subsubsection = \appendixsubsubsec } \outer\parseargdef\unnumbered{\unnmhead0{#1}} % normally unnmhead0 calls unnumberedzzz \def\unnumberedzzz#1{% \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\unnumberedno by 1 % % Since an unnumbered has no number, no prefix for figures. \global\let\chaplevelprefix = \empty \resetallfloatnos % % This used to be simply \message{#1}, but TeX fully expands the % argument to \message. Therefore, if #1 contained @-commands, TeX % expanded them. For example, in `@unnumbered The @cite{Book}', TeX % expanded @cite (which turns out to cause errors because \cite is meant % to be executed, not expanded). % % Anyway, we don't want the fully-expanded definition of @cite to appear % as a result of the \message, we just want `@cite' itself. We use % \the to achieve this: TeX expands \the only once, % simply yielding the contents of . (We also do this for % the toc entries.) \toks0 = {#1}% \message{(\the\toks0)}% % \chapmacro{#1}{Ynothing}{\the\unnumberedno}% % \global\let\section = \unnumberedsec \global\let\subsection = \unnumberedsubsec \global\let\subsubsection = \unnumberedsubsubsec } % @centerchap is like @unnumbered, but the heading is centered. \outer\parseargdef\centerchap{% % Well, we could do the following in a group, but that would break % an assumption that \chapmacro is called at the outermost level. % Thus we are safer this way: --kasal, 24feb04 \let\centerparametersmaybe = \centerparameters \unnmhead0{#1}% \let\centerparametersmaybe = \relax } % @top is like @unnumbered. \let\top\unnumbered % Sections. \outer\parseargdef\numberedsec{\numhead1{#1}} % normally calls seczzz \def\seczzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Ynumbered}{\the\chapno.\the\secno}% } \outer\parseargdef\appendixsection{\apphead1{#1}} % normally calls appendixsectionzzz \def\appendixsectionzzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Yappendix}{\appendixletter.\the\secno}% } \let\appendixsec\appendixsection \outer\parseargdef\unnumberedsec{\unnmhead1{#1}} % normally calls unnumberedseczzz \def\unnumberedseczzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Ynothing}{\the\unnumberedno.\the\secno}% } % Subsections. \outer\parseargdef\numberedsubsec{\numhead2{#1}} % normally calls numberedsubseczzz \def\numberedsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Ynumbered}{\the\chapno.\the\secno.\the\subsecno}% } \outer\parseargdef\appendixsubsec{\apphead2{#1}} % normally calls appendixsubseczzz \def\appendixsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Yappendix}% {\appendixletter.\the\secno.\the\subsecno}% } \outer\parseargdef\unnumberedsubsec{\unnmhead2{#1}} %normally calls unnumberedsubseczzz \def\unnumberedsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Ynothing}% {\the\unnumberedno.\the\secno.\the\subsecno}% } % Subsubsections. \outer\parseargdef\numberedsubsubsec{\numhead3{#1}} % normally numberedsubsubseczzz \def\numberedsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Ynumbered}% {\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno}% } \outer\parseargdef\appendixsubsubsec{\apphead3{#1}} % normally appendixsubsubseczzz \def\appendixsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Yappendix}% {\appendixletter.\the\secno.\the\subsecno.\the\subsubsecno}% } \outer\parseargdef\unnumberedsubsubsec{\unnmhead3{#1}} %normally unnumberedsubsubseczzz \def\unnumberedsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Ynothing}% {\the\unnumberedno.\the\secno.\the\subsecno.\the\subsubsecno}% } % These macros control what the section commands do, according % to what kind of chapter we are in (ordinary, appendix, or unnumbered). % Define them by default for a numbered chapter. \let\section = \numberedsec \let\subsection = \numberedsubsec \let\subsubsection = \numberedsubsubsec % Define @majorheading, @heading and @subheading % NOTE on use of \vbox for chapter headings, section headings, and such: % 1) We use \vbox rather than the earlier \line to permit % overlong headings to fold. % 2) \hyphenpenalty is set to 10000 because hyphenation in a % heading is obnoxious; this forbids it. % 3) Likewise, headings look best if no \parindent is used, and % if justification is not attempted. Hence \raggedright. \def\majorheading{% {\advance\chapheadingskip by 10pt \chapbreak }% \parsearg\chapheadingzzz } \def\chapheading{\chapbreak \parsearg\chapheadingzzz} \def\chapheadingzzz#1{% {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000 \parindent=0pt\raggedright \rm #1\hfill}}% \bigskip \par\penalty 200\relax \suppressfirstparagraphindent } % @heading, @subheading, @subsubheading. \parseargdef\heading{\sectionheading{#1}{sec}{Yomitfromtoc}{} \suppressfirstparagraphindent} \parseargdef\subheading{\sectionheading{#1}{subsec}{Yomitfromtoc}{} \suppressfirstparagraphindent} \parseargdef\subsubheading{\sectionheading{#1}{subsubsec}{Yomitfromtoc}{} \suppressfirstparagraphindent} % These macros generate a chapter, section, etc. heading only % (including whitespace, linebreaking, etc. around it), % given all the information in convenient, parsed form. %%% Args are the skip and penalty (usually negative) \def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi} %%% Define plain chapter starts, and page on/off switching for it % Parameter controlling skip before chapter headings (if needed) \newskip\chapheadingskip \def\chapbreak{\dobreak \chapheadingskip {-4000}} \def\chappager{\par\vfill\supereject} \def\chapoddpage{\chappager \ifodd\pageno \else \hbox to 0pt{} \chappager\fi} \def\setchapternewpage #1 {\csname CHAPPAG#1\endcsname} \def\CHAPPAGoff{% \global\let\contentsalignmacro = \chappager \global\let\pchapsepmacro=\chapbreak \global\let\pagealignmacro=\chappager} \def\CHAPPAGon{% \global\let\contentsalignmacro = \chappager \global\let\pchapsepmacro=\chappager \global\let\pagealignmacro=\chappager \global\def\HEADINGSon{\HEADINGSsingle}} \def\CHAPPAGodd{% \global\let\contentsalignmacro = \chapoddpage \global\let\pchapsepmacro=\chapoddpage \global\let\pagealignmacro=\chapoddpage \global\def\HEADINGSon{\HEADINGSdouble}} \CHAPPAGon % Chapter opening. % % #1 is the text, #2 is the section type (Ynumbered, Ynothing, % Yappendix, Yomitfromtoc), #3 the chapter number. % % To test against our argument. \def\Ynothingkeyword{Ynothing} \def\Yomitfromtockeyword{Yomitfromtoc} \def\Yappendixkeyword{Yappendix} % \def\chapmacro#1#2#3{% \pchapsepmacro {% \chapfonts \rm % % Have to define \thissection before calling \donoderef, because the % xref code eventually uses it. On the other hand, it has to be called % after \pchapsepmacro, or the headline will change too soon. \gdef\thissection{#1}% \gdef\thischaptername{#1}% % % Only insert the separating space if we have a chapter/appendix % number, and don't print the unnumbered ``number''. \def\temptype{#2}% \ifx\temptype\Ynothingkeyword \setbox0 = \hbox{}% \def\toctype{unnchap}% \gdef\thischapternum{}% \gdef\thischapter{#1}% \else\ifx\temptype\Yomitfromtockeyword \setbox0 = \hbox{}% contents like unnumbered, but no toc entry \def\toctype{omit}% \gdef\thischapternum{}% \gdef\thischapter{}% \else\ifx\temptype\Yappendixkeyword \setbox0 = \hbox{\putwordAppendix{} #3\enspace}% \def\toctype{app}% \xdef\thischapternum{\appendixletter}% % We don't substitute the actual chapter name into \thischapter % because we don't want its macros evaluated now. And we don't % use \thissection because that changes with each section. % \xdef\thischapter{\putwordAppendix{} \appendixletter: \noexpand\thischaptername}% \else \setbox0 = \hbox{#3\enspace}% \def\toctype{numchap}% \xdef\thischapternum{\the\chapno}% \xdef\thischapter{\putwordChapter{} \the\chapno: \noexpand\thischaptername}% \fi\fi\fi % % Write the toc entry for this chapter. Must come before the % \donoderef, because we include the current node name in the toc % entry, and \donoderef resets it to empty. \writetocentry{\toctype}{#1}{#3}% % % For pdftex, we have to write out the node definition (aka, make % the pdfdest) after any page break, but before the actual text has % been typeset. If the destination for the pdf outline is after the % text, then jumping from the outline may wind up with the text not % being visible, for instance under high magnification. \donoderef{#2}% % % Typeset the actual heading. \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright \hangindent=\wd0 \centerparametersmaybe \unhbox0 #1\par}% }% \nobreak\bigskip % no page break after a chapter title \nobreak } % @centerchap -- centered and unnumbered. \let\centerparametersmaybe = \relax \def\centerparameters{% \advance\rightskip by 3\rightskip \leftskip = \rightskip \parfillskip = 0pt } % I don't think this chapter style is supported any more, so I'm not % updating it with the new noderef stuff. We'll see. --karl, 11aug03. % \def\setchapterstyle #1 {\csname CHAPF#1\endcsname} % \def\unnchfopen #1{% \chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000 \parindent=0pt\raggedright \rm #1\hfill}}\bigskip \par\nobreak } \def\chfopen #1#2{\chapoddpage {\chapfonts \vbox to 3in{\vfil \hbox to\hsize{\hfil #2} \hbox to\hsize{\hfil #1} \vfil}}% \par\penalty 5000 % } \def\centerchfopen #1{% \chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000 \parindent=0pt \hfill {\rm #1}\hfill}}\bigskip \par\nobreak } \def\CHAPFopen{% \global\let\chapmacro=\chfopen \global\let\centerchapmacro=\centerchfopen} % Section titles. These macros combine the section number parts and % call the generic \sectionheading to do the printing. % \newskip\secheadingskip \def\secheadingbreak{\dobreak \secheadingskip{-1000}} % Subsection titles. \newskip\subsecheadingskip \def\subsecheadingbreak{\dobreak \subsecheadingskip{-500}} % Subsubsection titles. \def\subsubsecheadingskip{\subsecheadingskip} \def\subsubsecheadingbreak{\subsecheadingbreak} % Print any size, any type, section title. % % #1 is the text, #2 is the section level (sec/subsec/subsubsec), #3 is % the section type for xrefs (Ynumbered, Ynothing, Yappendix), #4 is the % section number. % \def\sectionheading#1#2#3#4{% {% % Switch to the right set of fonts. \csname #2fonts\endcsname \rm % % Insert space above the heading. \csname #2headingbreak\endcsname % % Only insert the space after the number if we have a section number. \def\sectionlevel{#2}% \def\temptype{#3}% % \ifx\temptype\Ynothingkeyword \setbox0 = \hbox{}% \def\toctype{unn}% \gdef\thissection{#1}% \else\ifx\temptype\Yomitfromtockeyword % for @headings -- no section number, don't include in toc, % and don't redefine \thissection. \setbox0 = \hbox{}% \def\toctype{omit}% \let\sectionlevel=\empty \else\ifx\temptype\Yappendixkeyword \setbox0 = \hbox{#4\enspace}% \def\toctype{app}% \gdef\thissection{#1}% \else \setbox0 = \hbox{#4\enspace}% \def\toctype{num}% \gdef\thissection{#1}% \fi\fi\fi % % Write the toc entry (before \donoderef). See comments in \chapmacro. \writetocentry{\toctype\sectionlevel}{#1}{#4}% % % Write the node reference (= pdf destination for pdftex). % Again, see comments in \chapmacro. \donoderef{#3}% % % Interline glue will be inserted when the vbox is completed. % That glue will be a valid breakpoint for the page, since it'll be % preceded by a whatsit (usually from the \donoderef, or from the % \writetocentry if there was no node). We don't want to allow that % break, since then the whatsits could end up on page n while the % section is on page n+1, thus toc/etc. are wrong. Debian bug 276000. \nobreak % % Output the actual section heading. \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright \hangindent=\wd0 % zero if no section number \unhbox0 #1}% }% % Add extra space after the heading -- half of whatever came above it. % Don't allow stretch, though. \kern .5 \csname #2headingskip\endcsname % % Do not let the kern be a potential breakpoint, as it would be if it % was followed by glue. \nobreak % % We'll almost certainly start a paragraph next, so don't let that % glue accumulate. (Not a breakpoint because it's preceded by a % discardable item.) \vskip-\parskip % % This is purely so the last item on the list is a known \penalty > % 10000. This is so \startdefun can avoid allowing breakpoints after % section headings. Otherwise, it would insert a valid breakpoint between: % % @section sec-whatever % @deffn def-whatever \penalty 10001 } \message{toc,} % Table of contents. \newwrite\tocfile % Write an entry to the toc file, opening it if necessary. % Called from @chapter, etc. % % Example usage: \writetocentry{sec}{Section Name}{\the\chapno.\the\secno} % We append the current node name (if any) and page number as additional % arguments for the \{chap,sec,...}entry macros which will eventually % read this. The node name is used in the pdf outlines as the % destination to jump to. % % We open the .toc file for writing here instead of at @setfilename (or % any other fixed time) so that @contents can be anywhere in the document. % But if #1 is `omit', then we don't do anything. This is used for the % table of contents chapter openings themselves. % \newif\iftocfileopened \def\omitkeyword{omit}% % \def\writetocentry#1#2#3{% \edef\writetoctype{#1}% \ifx\writetoctype\omitkeyword \else \iftocfileopened\else \immediate\openout\tocfile = \jobname.toc \global\tocfileopenedtrue \fi % \iflinks {\atdummies \edef\temp{% \write\tocfile{@#1entry{#2}{#3}{\lastnode}{\noexpand\folio}}}% \temp }% \fi \fi % % Tell \shipout to create a pdf destination on each page, if we're % writing pdf. These are used in the table of contents. We can't % just write one on every page because the title pages are numbered % 1 and 2 (the page numbers aren't printed), and so are the first % two pages of the document. Thus, we'd have two destinations named % `1', and two named `2'. \ifpdf \global\pdfmakepagedesttrue \fi } % These characters do not print properly in the Computer Modern roman % fonts, so we must take special care. This is more or less redundant % with the Texinfo input format setup at the end of this file. % \def\activecatcodes{% \catcode`\"=\active \catcode`\$=\active \catcode`\<=\active \catcode`\>=\active \catcode`\\=\active \catcode`\^=\active \catcode`\_=\active \catcode`\|=\active \catcode`\~=\active } % Read the toc file, which is essentially Texinfo input. \def\readtocfile{% \setupdatafile \activecatcodes \input \jobname.toc } \newskip\contentsrightmargin \contentsrightmargin=1in \newcount\savepageno \newcount\lastnegativepageno \lastnegativepageno = -1 % Prepare to read what we've written to \tocfile. % \def\startcontents#1{% % If @setchapternewpage on, and @headings double, the contents should % start on an odd page, unlike chapters. Thus, we maintain % \contentsalignmacro in parallel with \pagealignmacro. % From: Torbjorn Granlund \contentsalignmacro \immediate\closeout\tocfile % % Don't need to put `Contents' or `Short Contents' in the headline. % It is abundantly clear what they are. \def\thischapter{}% \chapmacro{#1}{Yomitfromtoc}{}% % \savepageno = \pageno \begingroup % Set up to handle contents files properly. \raggedbottom % Worry more about breakpoints than the bottom. \advance\hsize by -\contentsrightmargin % Don't use the full line length. % % Roman numerals for page numbers. \ifnum \pageno>0 \global\pageno = \lastnegativepageno \fi } % Normal (long) toc. \def\contents{% \startcontents{\putwordTOC}% \openin 1 \jobname.toc \ifeof 1 \else \readtocfile \fi \vfill \eject \contentsalignmacro % in case @setchapternewpage odd is in effect \ifeof 1 \else \pdfmakeoutlines \fi \closein 1 \endgroup \lastnegativepageno = \pageno \global\pageno = \savepageno } % And just the chapters. \def\summarycontents{% \startcontents{\putwordShortTOC}% % \let\numchapentry = \shortchapentry \let\appentry = \shortchapentry \let\unnchapentry = \shortunnchapentry % We want a true roman here for the page numbers. \secfonts \let\rm=\shortcontrm \let\bf=\shortcontbf \let\sl=\shortcontsl \let\tt=\shortconttt \rm \hyphenpenalty = 10000 \advance\baselineskip by 1pt % Open it up a little. \def\numsecentry##1##2##3##4{} \let\appsecentry = \numsecentry \let\unnsecentry = \numsecentry \let\numsubsecentry = \numsecentry \let\appsubsecentry = \numsecentry \let\unnsubsecentry = \numsecentry \let\numsubsubsecentry = \numsecentry \let\appsubsubsecentry = \numsecentry \let\unnsubsubsecentry = \numsecentry \openin 1 \jobname.toc \ifeof 1 \else \readtocfile \fi \closein 1 \vfill \eject \contentsalignmacro % in case @setchapternewpage odd is in effect \endgroup \lastnegativepageno = \pageno \global\pageno = \savepageno } \let\shortcontents = \summarycontents % Typeset the label for a chapter or appendix for the short contents. % The arg is, e.g., `A' for an appendix, or `3' for a chapter. % \def\shortchaplabel#1{% % This space should be enough, since a single number is .5em, and the % widest letter (M) is 1em, at least in the Computer Modern fonts. % But use \hss just in case. % (This space doesn't include the extra space that gets added after % the label; that gets put in by \shortchapentry above.) % % We'd like to right-justify chapter numbers, but that looks strange % with appendix letters. And right-justifying numbers and % left-justifying letters looks strange when there is less than 10 % chapters. Have to read the whole toc once to know how many chapters % there are before deciding ... \hbox to 1em{#1\hss}% } % These macros generate individual entries in the table of contents. % The first argument is the chapter or section name. % The last argument is the page number. % The arguments in between are the chapter number, section number, ... % Chapters, in the main contents. \def\numchapentry#1#2#3#4{\dochapentry{#2\labelspace#1}{#4}} % % Chapters, in the short toc. % See comments in \dochapentry re vbox and related settings. \def\shortchapentry#1#2#3#4{% \tocentry{\shortchaplabel{#2}\labelspace #1}{\doshortpageno\bgroup#4\egroup}% } % Appendices, in the main contents. % Need the word Appendix, and a fixed-size box. % \def\appendixbox#1{% % We use M since it's probably the widest letter. \setbox0 = \hbox{\putwordAppendix{} M}% \hbox to \wd0{\putwordAppendix{} #1\hss}} % \def\appentry#1#2#3#4{\dochapentry{\appendixbox{#2}\labelspace#1}{#4}} % Unnumbered chapters. \def\unnchapentry#1#2#3#4{\dochapentry{#1}{#4}} \def\shortunnchapentry#1#2#3#4{\tocentry{#1}{\doshortpageno\bgroup#4\egroup}} % Sections. \def\numsecentry#1#2#3#4{\dosecentry{#2\labelspace#1}{#4}} \let\appsecentry=\numsecentry \def\unnsecentry#1#2#3#4{\dosecentry{#1}{#4}} % Subsections. \def\numsubsecentry#1#2#3#4{\dosubsecentry{#2\labelspace#1}{#4}} \let\appsubsecentry=\numsubsecentry \def\unnsubsecentry#1#2#3#4{\dosubsecentry{#1}{#4}} % And subsubsections. \def\numsubsubsecentry#1#2#3#4{\dosubsubsecentry{#2\labelspace#1}{#4}} \let\appsubsubsecentry=\numsubsubsecentry \def\unnsubsubsecentry#1#2#3#4{\dosubsubsecentry{#1}{#4}} % This parameter controls the indentation of the various levels. % Same as \defaultparindent. \newdimen\tocindent \tocindent = 15pt % Now for the actual typesetting. In all these, #1 is the text and #2 is the % page number. % % If the toc has to be broken over pages, we want it to be at chapters % if at all possible; hence the \penalty. \def\dochapentry#1#2{% \penalty-300 \vskip1\baselineskip plus.33\baselineskip minus.25\baselineskip \begingroup \chapentryfonts \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup \nobreak\vskip .25\baselineskip plus.1\baselineskip } \def\dosecentry#1#2{\begingroup \secentryfonts \leftskip=\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} \def\dosubsecentry#1#2{\begingroup \subsecentryfonts \leftskip=2\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} \def\dosubsubsecentry#1#2{\begingroup \subsubsecentryfonts \leftskip=3\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} % We use the same \entry macro as for the index entries. \let\tocentry = \entry % Space between chapter (or whatever) number and the title. \def\labelspace{\hskip1em \relax} \def\dopageno#1{{\rm #1}} \def\doshortpageno#1{{\rm #1}} \def\chapentryfonts{\secfonts \rm} \def\secentryfonts{\textfonts} \def\subsecentryfonts{\textfonts} \def\subsubsecentryfonts{\textfonts} \message{environments,} % @foo ... @end foo. % @point{}, @result{}, @expansion{}, @print{}, @equiv{}. % % Since these characters are used in examples, it should be an even number of % \tt widths. Each \tt character is 1en, so two makes it 1em. % \def\point{$\star$} \def\result{\leavevmode\raise.15ex\hbox to 1em{\hfil$\Rightarrow$\hfil}} \def\expansion{\leavevmode\raise.1ex\hbox to 1em{\hfil$\mapsto$\hfil}} \def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}} \def\equiv{\leavevmode\lower.1ex\hbox to 1em{\hfil$\ptexequiv$\hfil}} % The @error{} command. % Adapted from the TeXbook's \boxit. % \newbox\errorbox % {\tentt \global\dimen0 = 3em}% Width of the box. \dimen2 = .55pt % Thickness of rules % The text. (`r' is open on the right, `e' somewhat less so on the left.) \setbox0 = \hbox{\kern-.75pt \reducedsf error\kern-1.5pt} % \setbox\errorbox=\hbox to \dimen0{\hfil \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right. \advance\hsize by -2\dimen2 % Rules. \vbox{% \hrule height\dimen2 \hbox{\vrule width\dimen2 \kern3pt % Space to left of text. \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below. \kern3pt\vrule width\dimen2}% Space to right. \hrule height\dimen2} \hfil} % \def\error{\leavevmode\lower.7ex\copy\errorbox} % @tex ... @end tex escapes into raw Tex temporarily. % One exception: @ is still an escape character, so that @end tex works. % But \@ or @@ will get a plain tex @ character. \envdef\tex{% \catcode `\\=0 \catcode `\{=1 \catcode `\}=2 \catcode `\$=3 \catcode `\&=4 \catcode `\#=6 \catcode `\^=7 \catcode `\_=8 \catcode `\~=\active \let~=\tie \catcode `\%=14 \catcode `\+=\other \catcode `\"=\other \catcode `\|=\other \catcode `\<=\other \catcode `\>=\other \escapechar=`\\ % \let\b=\ptexb \let\bullet=\ptexbullet \let\c=\ptexc \let\,=\ptexcomma \let\.=\ptexdot \let\dots=\ptexdots \let\equiv=\ptexequiv \let\!=\ptexexclam \let\i=\ptexi \let\indent=\ptexindent \let\noindent=\ptexnoindent \let\{=\ptexlbrace \let\+=\tabalign \let\}=\ptexrbrace \let\/=\ptexslash \let\*=\ptexstar \let\t=\ptext \let\frenchspacing=\plainfrenchspacing % \def\endldots{\mathinner{\ldots\ldots\ldots\ldots}}% \def\enddots{\relax\ifmmode\endldots\else$\mathsurround=0pt \endldots\,$\fi}% \def\@{@}% } % There is no need to define \Etex. % Define @lisp ... @end lisp. % @lisp environment forms a group so it can rebind things, % including the definition of @end lisp (which normally is erroneous). % Amount to narrow the margins by for @lisp. \newskip\lispnarrowing \lispnarrowing=0.4in % This is the definition that ^^M gets inside @lisp, @example, and other % such environments. \null is better than a space, since it doesn't % have any width. \def\lisppar{\null\endgraf} % This space is always present above and below environments. \newskip\envskipamount \envskipamount = 0pt % Make spacing and below environment symmetrical. We use \parskip here % to help in doing that, since in @example-like environments \parskip % is reset to zero; thus the \afterenvbreak inserts no space -- but the % start of the next paragraph will insert \parskip. % \def\aboveenvbreak{{% % =10000 instead of <10000 because of a special case in \itemzzz and % \sectionheading, q.v. \ifnum \lastpenalty=10000 \else \advance\envskipamount by \parskip \endgraf \ifdim\lastskip<\envskipamount \removelastskip % it's not a good place to break if the last penalty was \nobreak % or better ... \ifnum\lastpenalty<10000 \penalty-50 \fi \vskip\envskipamount \fi \fi }} \let\afterenvbreak = \aboveenvbreak % \nonarrowing is a flag. If "set", @lisp etc don't narrow margins; it will % also clear it, so that its embedded environments do the narrowing again. \let\nonarrowing=\relax % @cartouche ... @end cartouche: draw rectangle w/rounded corners around % environment contents. \font\circle=lcircle10 \newdimen\circthick \newdimen\cartouter\newdimen\cartinner \newskip\normbskip\newskip\normpskip\newskip\normlskip \circthick=\fontdimen8\circle % \def\ctl{{\circle\char'013\hskip -6pt}}% 6pt from pl file: 1/2charwidth \def\ctr{{\hskip 6pt\circle\char'010}} \def\cbl{{\circle\char'012\hskip -6pt}} \def\cbr{{\hskip 6pt\circle\char'011}} \def\carttop{\hbox to \cartouter{\hskip\lskip \ctl\leaders\hrule height\circthick\hfil\ctr \hskip\rskip}} \def\cartbot{\hbox to \cartouter{\hskip\lskip \cbl\leaders\hrule height\circthick\hfil\cbr \hskip\rskip}} % \newskip\lskip\newskip\rskip \envdef\cartouche{% \ifhmode\par\fi % can't be in the midst of a paragraph. \startsavinginserts \lskip=\leftskip \rskip=\rightskip \leftskip=0pt\rightskip=0pt % we want these *outside*. \cartinner=\hsize \advance\cartinner by-\lskip \advance\cartinner by-\rskip \cartouter=\hsize \advance\cartouter by 18.4pt % allow for 3pt kerns on either % side, and for 6pt waste from % each corner char, and rule thickness \normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip % Flag to tell @lisp, etc., not to narrow margin. \let\nonarrowing = t% \vbox\bgroup \baselineskip=0pt\parskip=0pt\lineskip=0pt \carttop \hbox\bgroup \hskip\lskip \vrule\kern3pt \vbox\bgroup \kern3pt \hsize=\cartinner \baselineskip=\normbskip \lineskip=\normlskip \parskip=\normpskip \vskip -\parskip \comment % For explanation, see the end of \def\group. } \def\Ecartouche{% \ifhmode\par\fi \kern3pt \egroup \kern3pt\vrule \hskip\rskip \egroup \cartbot \egroup \checkinserts } % This macro is called at the beginning of all the @example variants, % inside a group. \def\nonfillstart{% \aboveenvbreak \hfuzz = 12pt % Don't be fussy \sepspaces % Make spaces be word-separators rather than space tokens. \let\par = \lisppar % don't ignore blank lines \obeylines % each line of input is a line of output \parskip = 0pt \parindent = 0pt \emergencystretch = 0pt % don't try to avoid overfull boxes \ifx\nonarrowing\relax \advance \leftskip by \lispnarrowing \exdentamount=\lispnarrowing \else \let\nonarrowing = \relax \fi \let\exdent=\nofillexdent } % If you want all examples etc. small: @set dispenvsize small. % If you want even small examples the full size: @set dispenvsize nosmall. % This affects the following displayed environments: % @example, @display, @format, @lisp % \def\smallword{small} \def\nosmallword{nosmall} \let\SETdispenvsize\relax \def\setnormaldispenv{% \ifx\SETdispenvsize\smallword \smallexamplefonts \rm \fi } \def\setsmalldispenv{% \ifx\SETdispenvsize\nosmallword \else \smallexamplefonts \rm \fi } % We often define two environments, @foo and @smallfoo. % Let's do it by one command: \def\makedispenv #1#2{ \expandafter\envdef\csname#1\endcsname {\setnormaldispenv #2} \expandafter\envdef\csname small#1\endcsname {\setsmalldispenv #2} \expandafter\let\csname E#1\endcsname \afterenvbreak \expandafter\let\csname Esmall#1\endcsname \afterenvbreak } % Define two synonyms: \def\maketwodispenvs #1#2#3{ \makedispenv{#1}{#3} \makedispenv{#2}{#3} } % @lisp: indented, narrowed, typewriter font; @example: same as @lisp. % % @smallexample and @smalllisp: use smaller fonts. % Originally contributed by Pavel@xerox. % \maketwodispenvs {lisp}{example}{% \nonfillstart \tt\quoteexpand \let\kbdfont = \kbdexamplefont % Allow @kbd to do something special. \gobble % eat return } % @display/@smalldisplay: same as @lisp except keep current font. % \makedispenv {display}{% \nonfillstart \gobble } % @format/@smallformat: same as @display except don't narrow margins. % \makedispenv{format}{% \let\nonarrowing = t% \nonfillstart \gobble } % @flushleft: same as @format, but doesn't obey \SETdispenvsize. \envdef\flushleft{% \let\nonarrowing = t% \nonfillstart \gobble } \let\Eflushleft = \afterenvbreak % @flushright. % \envdef\flushright{% \let\nonarrowing = t% \nonfillstart \advance\leftskip by 0pt plus 1fill \gobble } \let\Eflushright = \afterenvbreak % @quotation does normal linebreaking (hence we can't use \nonfillstart) % and narrows the margins. We keep \parskip nonzero in general, since % we're doing normal filling. So, when using \aboveenvbreak and % \afterenvbreak, temporarily make \parskip 0. % \envdef\quotation{% {\parskip=0pt \aboveenvbreak}% because \aboveenvbreak inserts \parskip \parindent=0pt % % @cartouche defines \nonarrowing to inhibit narrowing at next level down. \ifx\nonarrowing\relax \advance\leftskip by \lispnarrowing \advance\rightskip by \lispnarrowing \exdentamount = \lispnarrowing \else \let\nonarrowing = \relax \fi \parsearg\quotationlabel } % We have retained a nonzero parskip for the environment, since we're % doing normal filling. % \def\Equotation{% \par \ifx\quotationauthor\undefined\else % indent a bit. \leftline{\kern 2\leftskip \sl ---\quotationauthor}% \fi {\parskip=0pt \afterenvbreak}% } % If we're given an argument, typeset it in bold with a colon after. \def\quotationlabel#1{% \def\temp{#1}% \ifx\temp\empty \else {\bf #1: }% \fi } % LaTeX-like @verbatim...@end verbatim and @verb{...} % If we want to allow any as delimiter, % we need the curly braces so that makeinfo sees the @verb command, eg: % `@verbx...x' would look like the '@verbx' command. --janneke@gnu.org % % [Knuth]: Donald Ervin Knuth, 1996. The TeXbook. % % [Knuth] p.344; only we need to do the other characters Texinfo sets % active too. Otherwise, they get lost as the first character on a % verbatim line. \def\dospecials{% \do\ \do\\\do\{\do\}\do\$\do\&% \do\#\do\^\do\^^K\do\_\do\^^A\do\%\do\~% \do\<\do\>\do\|\do\@\do+\do\"% } % % [Knuth] p. 380 \def\uncatcodespecials{% \def\do##1{\catcode`##1=\other}\dospecials} % % [Knuth] pp. 380,381,391 % Disable Spanish ligatures ?` and !` of \tt font \begingroup \catcode`\`=\active\gdef`{\relax\lq} \endgroup % % Setup for the @verb command. % % Eight spaces for a tab \begingroup \catcode`\^^I=\active \gdef\tabeightspaces{\catcode`\^^I=\active\def^^I{\ \ \ \ \ \ \ \ }} \endgroup % \def\setupverb{% \tt % easiest (and conventionally used) font for verbatim \def\par{\leavevmode\endgraf}% \catcode`\`=\active \tabeightspaces % Respect line breaks, % print special symbols as themselves, and % make each space count % must do in this order: \obeylines \uncatcodespecials \sepspaces } % Setup for the @verbatim environment % % Real tab expansion \newdimen\tabw \setbox0=\hbox{\tt\space} \tabw=8\wd0 % tab amount % \def\starttabbox{\setbox0=\hbox\bgroup} % Allow an option to not replace quotes with a regular directed right % quote/apostrophe (char 0x27), but instead use the undirected quote % from cmtt (char 0x0d). The undirected quote is ugly, so don't make it % the default, but it works for pasting with more pdf viewers (at least % evince), the lilypond developers report. xpdf does work with the % regular 0x27. % \def\codequoteright{% \expandafter\ifx\csname SETcodequoteundirected\endcsname\relax '% \else \char'15 \fi } % % and a similar option for the left quote char vs. a grave accent. % Modern fonts display ASCII 0x60 as a grave accent, so some people like % the code environments to do likewise. % \def\codequoteleft{% \expandafter\ifx\csname SETcodequotebacktick\endcsname\relax `% \else \char'22 \fi } % \begingroup \catcode`\^^I=\active \gdef\tabexpand{% \catcode`\^^I=\active \def^^I{\leavevmode\egroup \dimen0=\wd0 % the width so far, or since the previous tab \divide\dimen0 by\tabw \multiply\dimen0 by\tabw % compute previous multiple of \tabw \advance\dimen0 by\tabw % advance to next multiple of \tabw \wd0=\dimen0 \box0 \starttabbox }% } \catcode`\'=\active \gdef\rquoteexpand{\catcode\rquoteChar=\active \def'{\codequoteright}}% % \catcode`\`=\active \gdef\lquoteexpand{\catcode\lquoteChar=\active \def`{\codequoteleft}}% % \gdef\quoteexpand{\rquoteexpand \lquoteexpand}% \endgroup % start the verbatim environment. \def\setupverbatim{% \let\nonarrowing = t% \nonfillstart % Easiest (and conventionally used) font for verbatim \tt \def\par{\leavevmode\egroup\box0\endgraf}% \catcode`\`=\active \tabexpand \quoteexpand % Respect line breaks, % print special symbols as themselves, and % make each space count % must do in this order: \obeylines \uncatcodespecials \sepspaces \everypar{\starttabbox}% } % Do the @verb magic: verbatim text is quoted by unique % delimiter characters. Before first delimiter expect a % right brace, after last delimiter expect closing brace: % % \def\doverb'{'#1'}'{#1} % % [Knuth] p. 382; only eat outer {} \begingroup \catcode`[=1\catcode`]=2\catcode`\{=\other\catcode`\}=\other \gdef\doverb{#1[\def\next##1#1}[##1\endgroup]\next] \endgroup % \def\verb{\begingroup\setupverb\doverb} % % % Do the @verbatim magic: define the macro \doverbatim so that % the (first) argument ends when '@end verbatim' is reached, ie: % % \def\doverbatim#1@end verbatim{#1} % % For Texinfo it's a lot easier than for LaTeX, % because texinfo's \verbatim doesn't stop at '\end{verbatim}': % we need not redefine '\', '{' and '}'. % % Inspired by LaTeX's verbatim command set [latex.ltx] % \begingroup \catcode`\ =\active \obeylines % % ignore everything up to the first ^^M, that's the newline at the end % of the @verbatim input line itself. Otherwise we get an extra blank % line in the output. \xdef\doverbatim#1^^M#2@end verbatim{#2\noexpand\end\gobble verbatim}% % We really want {...\end verbatim} in the body of the macro, but % without the active space; thus we have to use \xdef and \gobble. \endgroup % \envdef\verbatim{% \setupverbatim\doverbatim } \let\Everbatim = \afterenvbreak % @verbatiminclude FILE - insert text of file in verbatim environment. % \def\verbatiminclude{\parseargusing\filenamecatcodes\doverbatiminclude} % \def\doverbatiminclude#1{% {% \makevalueexpandable \setupverbatim \input #1 \afterenvbreak }% } % @copying ... @end copying. % Save the text away for @insertcopying later. % % We save the uninterpreted tokens, rather than creating a box. % Saving the text in a box would be much easier, but then all the % typesetting commands (@smallbook, font changes, etc.) have to be done % beforehand -- and a) we want @copying to be done first in the source % file; b) letting users define the frontmatter in as flexible order as % possible is very desirable. % \def\copying{\checkenv{}\begingroup\scanargctxt\docopying} \def\docopying#1@end copying{\endgroup\def\copyingtext{#1}} % \def\insertcopying{% \begingroup \parindent = 0pt % paragraph indentation looks wrong on title page \scanexp\copyingtext \endgroup } \message{defuns,} % @defun etc. \newskip\defbodyindent \defbodyindent=.4in \newskip\defargsindent \defargsindent=50pt \newskip\deflastargmargin \deflastargmargin=18pt % Start the processing of @deffn: \def\startdefun{% \ifnum\lastpenalty<10000 \medbreak \else % If there are two @def commands in a row, we'll have a \nobreak, % which is there to keep the function description together with its % header. But if there's nothing but headers, we need to allow a % break somewhere. Check specifically for penalty 10002, inserted % by \defargscommonending, instead of 10000, since the sectioning % commands also insert a nobreak penalty, and we don't want to allow % a break between a section heading and a defun. % \ifnum\lastpenalty=10002 \penalty2000 \fi % % Similarly, after a section heading, do not allow a break. % But do insert the glue. \medskip % preceded by discardable penalty, so not a breakpoint \fi % \parindent=0in \advance\leftskip by \defbodyindent \exdentamount=\defbodyindent } \def\dodefunx#1{% % First, check whether we are in the right environment: \checkenv#1% % % As above, allow line break if we have multiple x headers in a row. % It's not a great place, though. \ifnum\lastpenalty=10002 \penalty3000 \fi % % And now, it's time to reuse the body of the original defun: \expandafter\gobbledefun#1% } \def\gobbledefun#1\startdefun{} % \printdefunline \deffnheader{text} % \def\printdefunline#1#2{% \begingroup % call \deffnheader: #1#2 \endheader % common ending: \interlinepenalty = 10000 \advance\rightskip by 0pt plus 1fil \endgraf \nobreak\vskip -\parskip \penalty 10002 % signal to \startdefun and \dodefunx % Some of the @defun-type tags do not enable magic parentheses, % rendering the following check redundant. But we don't optimize. \checkparencounts \endgroup } \def\Edefun{\endgraf\medbreak} % \makedefun{deffn} creates \deffn, \deffnx and \Edeffn; % the only thing remainnig is to define \deffnheader. % \def\makedefun#1{% \expandafter\let\csname E#1\endcsname = \Edefun \edef\temp{\noexpand\domakedefun \makecsname{#1}\makecsname{#1x}\makecsname{#1header}}% \temp } % \domakedefun \deffn \deffnx \deffnheader % % Define \deffn and \deffnx, without parameters. % \deffnheader has to be defined explicitly. % \def\domakedefun#1#2#3{% \envdef#1{% \startdefun \parseargusing\activeparens{\printdefunline#3}% }% \def#2{\dodefunx#1}% \def#3% } %%% Untyped functions: % @deffn category name args \makedefun{deffn}{\deffngeneral{}} % @deffn category class name args \makedefun{defop}#1 {\defopon{#1\ \putwordon}} % \defopon {category on}class name args \def\defopon#1#2 {\deffngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} } % \deffngeneral {subind}category name args % \def\deffngeneral#1#2 #3 #4\endheader{% % Remember that \dosubind{fn}{foo}{} is equivalent to \doind{fn}{foo}. \dosubind{fn}{\code{#3}}{#1}% \defname{#2}{}{#3}\magicamp\defunargs{#4\unskip}% } %%% Typed functions: % @deftypefn category type name args \makedefun{deftypefn}{\deftypefngeneral{}} % @deftypeop category class type name args \makedefun{deftypeop}#1 {\deftypeopon{#1\ \putwordon}} % \deftypeopon {category on}class type name args \def\deftypeopon#1#2 {\deftypefngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} } % \deftypefngeneral {subind}category type name args % \def\deftypefngeneral#1#2 #3 #4 #5\endheader{% \dosubind{fn}{\code{#4}}{#1}% \defname{#2}{#3}{#4}\defunargs{#5\unskip}% } %%% Typed variables: % @deftypevr category type var args \makedefun{deftypevr}{\deftypecvgeneral{}} % @deftypecv category class type var args \makedefun{deftypecv}#1 {\deftypecvof{#1\ \putwordof}} % \deftypecvof {category of}class type var args \def\deftypecvof#1#2 {\deftypecvgeneral{\putwordof\ \code{#2}}{#1\ \code{#2}} } % \deftypecvgeneral {subind}category type var args % \def\deftypecvgeneral#1#2 #3 #4 #5\endheader{% \dosubind{vr}{\code{#4}}{#1}% \defname{#2}{#3}{#4}\defunargs{#5\unskip}% } %%% Untyped variables: % @defvr category var args \makedefun{defvr}#1 {\deftypevrheader{#1} {} } % @defcv category class var args \makedefun{defcv}#1 {\defcvof{#1\ \putwordof}} % \defcvof {category of}class var args \def\defcvof#1#2 {\deftypecvof{#1}#2 {} } %%% Type: % @deftp category name args \makedefun{deftp}#1 #2 #3\endheader{% \doind{tp}{\code{#2}}% \defname{#1}{}{#2}\defunargs{#3\unskip}% } % Remaining @defun-like shortcuts: \makedefun{defun}{\deffnheader{\putwordDeffunc} } \makedefun{defmac}{\deffnheader{\putwordDefmac} } \makedefun{defspec}{\deffnheader{\putwordDefspec} } \makedefun{deftypefun}{\deftypefnheader{\putwordDeffunc} } \makedefun{defvar}{\defvrheader{\putwordDefvar} } \makedefun{defopt}{\defvrheader{\putwordDefopt} } \makedefun{deftypevar}{\deftypevrheader{\putwordDefvar} } \makedefun{defmethod}{\defopon\putwordMethodon} \makedefun{deftypemethod}{\deftypeopon\putwordMethodon} \makedefun{defivar}{\defcvof\putwordInstanceVariableof} \makedefun{deftypeivar}{\deftypecvof\putwordInstanceVariableof} % \defname, which formats the name of the @def (not the args). % #1 is the category, such as "Function". % #2 is the return type, if any. % #3 is the function name. % % We are followed by (but not passed) the arguments, if any. % \def\defname#1#2#3{% % Get the values of \leftskip and \rightskip as they were outside the @def... \advance\leftskip by -\defbodyindent % % How we'll format the type name. Putting it in brackets helps % distinguish it from the body text that may end up on the next line % just below it. \def\temp{#1}% \setbox0=\hbox{\kern\deflastargmargin \ifx\temp\empty\else [\rm\temp]\fi} % % Figure out line sizes for the paragraph shape. % The first line needs space for \box0; but if \rightskip is nonzero, % we need only space for the part of \box0 which exceeds it: \dimen0=\hsize \advance\dimen0 by -\wd0 \advance\dimen0 by \rightskip % The continuations: \dimen2=\hsize \advance\dimen2 by -\defargsindent % (plain.tex says that \dimen1 should be used only as global.) \parshape 2 0in \dimen0 \defargsindent \dimen2 % % Put the type name to the right margin. \noindent \hbox to 0pt{% \hfil\box0 \kern-\hsize % \hsize has to be shortened this way: \kern\leftskip % Intentionally do not respect \rightskip, since we need the space. }% % % Allow all lines to be underfull without complaint: \tolerance=10000 \hbadness=10000 \exdentamount=\defbodyindent {% % defun fonts. We use typewriter by default (used to be bold) because: % . we're printing identifiers, they should be in tt in principle. % . in languages with many accents, such as Czech or French, it's % common to leave accents off identifiers. The result looks ok in % tt, but exceedingly strange in rm. % . we don't want -- and --- to be treated as ligatures. % . this still does not fix the ?` and !` ligatures, but so far no % one has made identifiers using them :). \df \tt \def\temp{#2}% return value type \ifx\temp\empty\else \tclose{\temp} \fi #3% output function name }% {\rm\enskip}% hskip 0.5 em of \tenrm % \boldbrax % arguments will be output next, if any. } % Print arguments in slanted roman (not ttsl), inconsistently with using % tt for the name. This is because literal text is sometimes needed in % the argument list (groff manual), and ttsl and tt are not very % distinguishable. Prevent hyphenation at `-' chars. % \def\defunargs#1{% % use sl by default (not ttsl), % tt for the names. \df \sl \hyphenchar\font=0 % % On the other hand, if an argument has two dashes (for instance), we % want a way to get ttsl. Let's try @var for that. \let\var=\ttslanted #1% \sl\hyphenchar\font=45 } % We want ()&[] to print specially on the defun line. % \def\activeparens{% \catcode`\(=\active \catcode`\)=\active \catcode`\[=\active \catcode`\]=\active \catcode`\&=\active } % Make control sequences which act like normal parenthesis chars. \let\lparen = ( \let\rparen = ) % Be sure that we always have a definition for `(', etc. For example, % if the fn name has parens in it, \boldbrax will not be in effect yet, % so TeX would otherwise complain about undefined control sequence. { \activeparens \global\let(=\lparen \global\let)=\rparen \global\let[=\lbrack \global\let]=\rbrack \global\let& = \& \gdef\boldbrax{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb} \gdef\magicamp{\let&=\amprm} } \newcount\parencount % If we encounter &foo, then turn on ()-hacking afterwards \newif\ifampseen \def\amprm#1 {\ampseentrue{\bf\ }} \def\parenfont{% \ifampseen % At the first level, print parens in roman, % otherwise use the default font. \ifnum \parencount=1 \rm \fi \else % The \sf parens (in \boldbrax) actually are a little bolder than % the contained text. This is especially needed for [ and ] . \sf \fi } \def\infirstlevel#1{% \ifampseen \ifnum\parencount=1 #1% \fi \fi } \def\bfafterword#1 {#1 \bf} \def\opnr{% \global\advance\parencount by 1 {\parenfont(}% \infirstlevel \bfafterword } \def\clnr{% {\parenfont)}% \infirstlevel \sl \global\advance\parencount by -1 } \newcount\brackcount \def\lbrb{% \global\advance\brackcount by 1 {\bf[}% } \def\rbrb{% {\bf]}% \global\advance\brackcount by -1 } \def\checkparencounts{% \ifnum\parencount=0 \else \badparencount \fi \ifnum\brackcount=0 \else \badbrackcount \fi } \def\badparencount{% \errmessage{Unbalanced parentheses in @def}% \global\parencount=0 } \def\badbrackcount{% \errmessage{Unbalanced square braces in @def}% \global\brackcount=0 } \message{macros,} % @macro. % To do this right we need a feature of e-TeX, \scantokens, % which we arrange to emulate with a temporary file in ordinary TeX. \ifx\eTeXversion\undefined \newwrite\macscribble \def\scantokens#1{% \toks0={#1}% \immediate\openout\macscribble=\jobname.tmp \immediate\write\macscribble{\the\toks0}% \immediate\closeout\macscribble \input \jobname.tmp } \fi \def\scanmacro#1{% \begingroup \newlinechar`\^^M \let\xeatspaces\eatspaces % Undo catcode changes of \startcontents and \doprintindex % When called from @insertcopying or (short)caption, we need active % backslash to get it printed correctly. Previously, we had % \catcode`\\=\other instead. We'll see whether a problem appears % with macro expansion. --kasal, 19aug04 \catcode`\@=0 \catcode`\\=\active \escapechar=`\@ % ... and \example \spaceisspace % % Append \endinput to make sure that TeX does not see the ending newline. % I've verified that it is necessary both for e-TeX and for ordinary TeX % --kasal, 29nov03 \scantokens{#1\endinput}% \endgroup } \def\scanexp#1{% \edef\temp{\noexpand\scanmacro{#1}}% \temp } \newcount\paramno % Count of parameters \newtoks\macname % Macro name \newif\ifrecursive % Is it recursive? % List of all defined macros in the form % \definedummyword\macro1\definedummyword\macro2... % Currently is also contains all @aliases; the list can be split % if there is a need. \def\macrolist{} % Add the macro to \macrolist \def\addtomacrolist#1{\expandafter \addtomacrolistxxx \csname#1\endcsname} \def\addtomacrolistxxx#1{% \toks0 = \expandafter{\macrolist\definedummyword#1}% \xdef\macrolist{\the\toks0}% } % Utility routines. % This does \let #1 = #2, with \csnames; that is, % \let \csname#1\endcsname = \csname#2\endcsname % (except of course we have to play expansion games). % \def\cslet#1#2{% \expandafter\let \csname#1\expandafter\endcsname \csname#2\endcsname } % Trim leading and trailing spaces off a string. % Concepts from aro-bend problem 15 (see CTAN). {\catcode`\@=11 \gdef\eatspaces #1{\expandafter\trim@\expandafter{#1 }} \gdef\trim@ #1{\trim@@ @#1 @ #1 @ @@} \gdef\trim@@ #1@ #2@ #3@@{\trim@@@\empty #2 @} \def\unbrace#1{#1} \unbrace{\gdef\trim@@@ #1 } #2@{#1} } % Trim a single trailing ^^M off a string. {\catcode`\^^M=\other \catcode`\Q=3% \gdef\eatcr #1{\eatcra #1Q^^MQ}% \gdef\eatcra#1^^MQ{\eatcrb#1Q}% \gdef\eatcrb#1Q#2Q{#1}% } % Macro bodies are absorbed as an argument in a context where % all characters are catcode 10, 11 or 12, except \ which is active % (as in normal texinfo). It is necessary to change the definition of \. % It's necessary to have hard CRs when the macro is executed. This is % done by making ^^M (\endlinechar) catcode 12 when reading the macro % body, and then making it the \newlinechar in \scanmacro. \def\scanctxt{% \catcode`\"=\other \catcode`\+=\other \catcode`\<=\other \catcode`\>=\other \catcode`\@=\other \catcode`\^=\other \catcode`\_=\other \catcode`\|=\other \catcode`\~=\other } \def\scanargctxt{% \scanctxt \catcode`\\=\other \catcode`\^^M=\other } \def\macrobodyctxt{% \scanctxt \catcode`\{=\other \catcode`\}=\other \catcode`\^^M=\other \usembodybackslash } \def\macroargctxt{% \scanctxt \catcode`\\=\other } % \mbodybackslash is the definition of \ in @macro bodies. % It maps \foo\ => \csname macarg.foo\endcsname => #N % where N is the macro parameter number. % We define \csname macarg.\endcsname to be \realbackslash, so % \\ in macro replacement text gets you a backslash. {\catcode`@=0 @catcode`@\=@active @gdef@usembodybackslash{@let\=@mbodybackslash} @gdef@mbodybackslash#1\{@csname macarg.#1@endcsname} } \expandafter\def\csname macarg.\endcsname{\realbackslash} \def\macro{\recursivefalse\parsearg\macroxxx} \def\rmacro{\recursivetrue\parsearg\macroxxx} \def\macroxxx#1{% \getargs{#1}% now \macname is the macname and \argl the arglist \ifx\argl\empty % no arguments \paramno=0% \else \expandafter\parsemargdef \argl;% \fi \if1\csname ismacro.\the\macname\endcsname \message{Warning: redefining \the\macname}% \else \expandafter\ifx\csname \the\macname\endcsname \relax \else \errmessage{Macro name \the\macname\space already defined}\fi \global\cslet{macsave.\the\macname}{\the\macname}% \global\expandafter\let\csname ismacro.\the\macname\endcsname=1% \addtomacrolist{\the\macname}% \fi \begingroup \macrobodyctxt \ifrecursive \expandafter\parsermacbody \else \expandafter\parsemacbody \fi} \parseargdef\unmacro{% \if1\csname ismacro.#1\endcsname \global\cslet{#1}{macsave.#1}% \global\expandafter\let \csname ismacro.#1\endcsname=0% % Remove the macro name from \macrolist: \begingroup \expandafter\let\csname#1\endcsname \relax \let\definedummyword\unmacrodo \xdef\macrolist{\macrolist}% \endgroup \else \errmessage{Macro #1 not defined}% \fi } % Called by \do from \dounmacro on each macro. The idea is to omit any % macro definitions that have been changed to \relax. % \def\unmacrodo#1{% \ifx #1\relax % remove this \else \noexpand\definedummyword \noexpand#1% \fi } % This makes use of the obscure feature that if the last token of a % is #, then the preceding argument is delimited by % an opening brace, and that opening brace is not consumed. \def\getargs#1{\getargsxxx#1{}} \def\getargsxxx#1#{\getmacname #1 \relax\getmacargs} \def\getmacname #1 #2\relax{\macname={#1}} \def\getmacargs#1{\def\argl{#1}} % Parse the optional {params} list. Set up \paramno and \paramlist % so \defmacro knows what to do. Define \macarg.blah for each blah % in the params list, to be ##N where N is the position in that list. % That gets used by \mbodybackslash (above). % We need to get `macro parameter char #' into several definitions. % The technique used is stolen from LaTeX: let \hash be something % unexpandable, insert that wherever you need a #, and then redefine % it to # just before using the token list produced. % % The same technique is used to protect \eatspaces till just before % the macro is used. \def\parsemargdef#1;{\paramno=0\def\paramlist{}% \let\hash\relax\let\xeatspaces\relax\parsemargdefxxx#1,;,} \def\parsemargdefxxx#1,{% \if#1;\let\next=\relax \else \let\next=\parsemargdefxxx \advance\paramno by 1% \expandafter\edef\csname macarg.\eatspaces{#1}\endcsname {\xeatspaces{\hash\the\paramno}}% \edef\paramlist{\paramlist\hash\the\paramno,}% \fi\next} % These two commands read recursive and nonrecursive macro bodies. % (They're different since rec and nonrec macros end differently.) \long\def\parsemacbody#1@end macro% {\xdef\temp{\eatcr{#1}}\endgroup\defmacro}% \long\def\parsermacbody#1@end rmacro% {\xdef\temp{\eatcr{#1}}\endgroup\defmacro}% % This defines the macro itself. There are six cases: recursive and % nonrecursive macros of zero, one, and many arguments. % Much magic with \expandafter here. % \xdef is used so that macro definitions will survive the file % they're defined in; @include reads the file inside a group. \def\defmacro{% \let\hash=##% convert placeholders to macro parameter chars \ifrecursive \ifcase\paramno % 0 \expandafter\xdef\csname\the\macname\endcsname{% \noexpand\scanmacro{\temp}}% \or % 1 \expandafter\xdef\csname\the\macname\endcsname{% \bgroup\noexpand\macroargctxt \noexpand\braceorline \expandafter\noexpand\csname\the\macname xxx\endcsname}% \expandafter\xdef\csname\the\macname xxx\endcsname##1{% \egroup\noexpand\scanmacro{\temp}}% \else % many \expandafter\xdef\csname\the\macname\endcsname{% \bgroup\noexpand\macroargctxt \noexpand\csname\the\macname xx\endcsname}% \expandafter\xdef\csname\the\macname xx\endcsname##1{% \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}% \expandafter\expandafter \expandafter\xdef \expandafter\expandafter \csname\the\macname xxx\endcsname \paramlist{\egroup\noexpand\scanmacro{\temp}}% \fi \else \ifcase\paramno % 0 \expandafter\xdef\csname\the\macname\endcsname{% \noexpand\norecurse{\the\macname}% \noexpand\scanmacro{\temp}\egroup}% \or % 1 \expandafter\xdef\csname\the\macname\endcsname{% \bgroup\noexpand\macroargctxt \noexpand\braceorline \expandafter\noexpand\csname\the\macname xxx\endcsname}% \expandafter\xdef\csname\the\macname xxx\endcsname##1{% \egroup \noexpand\norecurse{\the\macname}% \noexpand\scanmacro{\temp}\egroup}% \else % many \expandafter\xdef\csname\the\macname\endcsname{% \bgroup\noexpand\macroargctxt \expandafter\noexpand\csname\the\macname xx\endcsname}% \expandafter\xdef\csname\the\macname xx\endcsname##1{% \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}% \expandafter\expandafter \expandafter\xdef \expandafter\expandafter \csname\the\macname xxx\endcsname \paramlist{% \egroup \noexpand\norecurse{\the\macname}% \noexpand\scanmacro{\temp}\egroup}% \fi \fi} \def\norecurse#1{\bgroup\cslet{#1}{macsave.#1}} % \braceorline decides whether the next nonwhitespace character is a % {. If so it reads up to the closing }, if not, it reads the whole % line. Whatever was read is then fed to the next control sequence % as an argument (by \parsebrace or \parsearg) \def\braceorline#1{\let\macnamexxx=#1\futurelet\nchar\braceorlinexxx} \def\braceorlinexxx{% \ifx\nchar\bgroup\else \expandafter\parsearg \fi \macnamexxx} % @alias. % We need some trickery to remove the optional spaces around the equal % sign. Just make them active and then expand them all to nothing. \def\alias{\parseargusing\obeyspaces\aliasxxx} \def\aliasxxx #1{\aliasyyy#1\relax} \def\aliasyyy #1=#2\relax{% {% \expandafter\let\obeyedspace=\empty \addtomacrolist{#1}% \xdef\next{\global\let\makecsname{#1}=\makecsname{#2}}% }% \next } \message{cross references,} \newwrite\auxfile \newif\ifhavexrefs % True if xref values are known. \newif\ifwarnedxrefs % True if we warned once that they aren't known. % @inforef is relatively simple. \def\inforef #1{\inforefzzz #1,,,,**} \def\inforefzzz #1,#2,#3,#4**{\putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}}, node \samp{\ignorespaces#1{}}} % @node's only job in TeX is to define \lastnode, which is used in % cross-references. The @node line might or might not have commas, and % might or might not have spaces before the first comma, like: % @node foo , bar , ... % We don't want such trailing spaces in the node name. % \parseargdef\node{\checkenv{}\donode #1 ,\finishnodeparse} % % also remove a trailing comma, in case of something like this: % @node Help-Cross, , , Cross-refs \def\donode#1 ,#2\finishnodeparse{\dodonode #1,\finishnodeparse} \def\dodonode#1,#2\finishnodeparse{\gdef\lastnode{#1}} \let\nwnode=\node \let\lastnode=\empty % Write a cross-reference definition for the current node. #1 is the % type (Ynumbered, Yappendix, Ynothing). % \def\donoderef#1{% \ifx\lastnode\empty\else \setref{\lastnode}{#1}% \global\let\lastnode=\empty \fi } % @anchor{NAME} -- define xref target at arbitrary point. % \newcount\savesfregister % \def\savesf{\relax \ifhmode \savesfregister=\spacefactor \fi} \def\restoresf{\relax \ifhmode \spacefactor=\savesfregister \fi} \def\anchor#1{\savesf \setref{#1}{Ynothing}\restoresf \ignorespaces} % \setref{NAME}{SNT} defines a cross-reference point NAME (a node or an % anchor), which consists of three parts: % 1) NAME-title - the current sectioning name taken from \thissection, % or the anchor name. % 2) NAME-snt - section number and type, passed as the SNT arg, or % empty for anchors. % 3) NAME-pg - the page number. % % This is called from \donoderef, \anchor, and \dofloat. In the case of % floats, there is an additional part, which is not written here: % 4) NAME-lof - the text as it should appear in a @listoffloats. % \def\setref#1#2{% \pdfmkdest{#1}% \iflinks {% \atdummies % preserve commands, but don't expand them \edef\writexrdef##1##2{% \write\auxfile{@xrdef{#1-% #1 of \setref, expanded by the \edef ##1}{##2}}% these are parameters of \writexrdef }% \toks0 = \expandafter{\thissection}% \immediate \writexrdef{title}{\the\toks0 }% \immediate \writexrdef{snt}{\csname #2\endcsname}% \Ynumbered etc. \writexrdef{pg}{\folio}% will be written later, during \shipout }% \fi } % @xref, @pxref, and @ref generate cross-references. For \xrefX, #1 is % the node name, #2 the name of the Info cross-reference, #3 the printed % node name, #4 the name of the Info file, #5 the name of the printed % manual. All but the node name can be omitted. % \def\pxref#1{\putwordsee{} \xrefX[#1,,,,,,,]} \def\xref#1{\putwordSee{} \xrefX[#1,,,,,,,]} \def\ref#1{\xrefX[#1,,,,,,,]} \def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup \unsepspaces \def\printedmanual{\ignorespaces #5}% \def\printedrefname{\ignorespaces #3}% \setbox1=\hbox{\printedmanual\unskip}% \setbox0=\hbox{\printedrefname\unskip}% \ifdim \wd0 = 0pt % No printed node name was explicitly given. \expandafter\ifx\csname SETxref-automatic-section-title\endcsname\relax % Use the node name inside the square brackets. \def\printedrefname{\ignorespaces #1}% \else % Use the actual chapter/section title appear inside % the square brackets. Use the real section title if we have it. \ifdim \wd1 > 0pt % It is in another manual, so we don't have it. \def\printedrefname{\ignorespaces #1}% \else \ifhavexrefs % We know the real title if we have the xref values. \def\printedrefname{\refx{#1-title}{}}% \else % Otherwise just copy the Info node name. \def\printedrefname{\ignorespaces #1}% \fi% \fi \fi \fi % % Make link in pdf output. \ifpdf \leavevmode \getfilename{#4}% {\turnoffactive % See comments at \activebackslashdouble. {\activebackslashdouble \xdef\pdfxrefdest{#1}% \backslashparens\pdfxrefdest}% % \ifnum\filenamelength>0 \startlink attr{/Border [0 0 0]}% goto file{\the\filename.pdf} name{\pdfxrefdest}% \else \startlink attr{/Border [0 0 0]}% goto name{\pdfmkpgn{\pdfxrefdest}}% \fi }% \linkcolor \fi % % Float references are printed completely differently: "Figure 1.2" % instead of "[somenode], p.3". We distinguish them by the % LABEL-title being set to a magic string. {% % Have to otherify everything special to allow the \csname to % include an _ in the xref name, etc. \indexnofonts \turnoffactive \expandafter\global\expandafter\let\expandafter\Xthisreftitle \csname XR#1-title\endcsname }% \iffloat\Xthisreftitle % If the user specified the print name (third arg) to the ref, % print it instead of our usual "Figure 1.2". \ifdim\wd0 = 0pt \refx{#1-snt}{}% \else \printedrefname \fi % % if the user also gave the printed manual name (fifth arg), append % "in MANUALNAME". \ifdim \wd1 > 0pt \space \putwordin{} \cite{\printedmanual}% \fi \else % node/anchor (non-float) references. % % If we use \unhbox0 and \unhbox1 to print the node names, TeX does not % insert empty discretionaries after hyphens, which means that it will % not find a line break at a hyphen in a node names. Since some manuals % are best written with fairly long node names, containing hyphens, this % is a loss. Therefore, we give the text of the node name again, so it % is as if TeX is seeing it for the first time. \ifdim \wd1 > 0pt \putwordsection{} ``\printedrefname'' \putwordin{} \cite{\printedmanual}% \else % _ (for example) has to be the character _ for the purposes of the % control sequence corresponding to the node, but it has to expand % into the usual \leavevmode...\vrule stuff for purposes of % printing. So we \turnoffactive for the \refx-snt, back on for the % printing, back off for the \refx-pg. {\turnoffactive % Only output a following space if the -snt ref is nonempty; for % @unnumbered and @anchor, it won't be. \setbox2 = \hbox{\ignorespaces \refx{#1-snt}{}}% \ifdim \wd2 > 0pt \refx{#1-snt}\space\fi }% % output the `[mynode]' via a macro so it can be overridden. \xrefprintnodename\printedrefname % % But we always want a comma and a space: ,\space % % output the `page 3'. \turnoffactive \putwordpage\tie\refx{#1-pg}{}% \fi \fi \endlink \endgroup} % This macro is called from \xrefX for the `[nodename]' part of xref % output. It's a separate macro only so it can be changed more easily, % since square brackets don't work well in some documents. Particularly % one that Bob is working on :). % \def\xrefprintnodename#1{[#1]} % Things referred to by \setref. % \def\Ynothing{} \def\Yomitfromtoc{} \def\Ynumbered{% \ifnum\secno=0 \putwordChapter@tie \the\chapno \else \ifnum\subsecno=0 \putwordSection@tie \the\chapno.\the\secno \else \ifnum\subsubsecno=0 \putwordSection@tie \the\chapno.\the\secno.\the\subsecno \else \putwordSection@tie \the\chapno.\the\secno.\the\subsecno.\the\subsubsecno \fi\fi\fi } \def\Yappendix{% \ifnum\secno=0 \putwordAppendix@tie @char\the\appendixno{}% \else \ifnum\subsecno=0 \putwordSection@tie @char\the\appendixno.\the\secno \else \ifnum\subsubsecno=0 \putwordSection@tie @char\the\appendixno.\the\secno.\the\subsecno \else \putwordSection@tie @char\the\appendixno.\the\secno.\the\subsecno.\the\subsubsecno \fi\fi\fi } % Define \refx{NAME}{SUFFIX} to reference a cross-reference string named NAME. % If its value is nonempty, SUFFIX is output afterward. % \def\refx#1#2{% {% \indexnofonts \otherbackslash \expandafter\global\expandafter\let\expandafter\thisrefX \csname XR#1\endcsname }% \ifx\thisrefX\relax % If not defined, say something at least. \angleleft un\-de\-fined\angleright \iflinks \ifhavexrefs \message{\linenumber Undefined cross reference `#1'.}% \else \ifwarnedxrefs\else \global\warnedxrefstrue \message{Cross reference values unknown; you must run TeX again.}% \fi \fi \fi \else % It's defined, so just use it. \thisrefX \fi #2% Output the suffix in any case. } % This is the macro invoked by entries in the aux file. Usually it's % just a \def (we prepend XR to the control sequence name to avoid % collisions). But if this is a float type, we have more work to do. % \def\xrdef#1#2{% \expandafter\gdef\csname XR#1\endcsname{#2}% remember this xref value. % % Was that xref control sequence that we just defined for a float? \expandafter\iffloat\csname XR#1\endcsname % it was a float, and we have the (safe) float type in \iffloattype. \expandafter\let\expandafter\floatlist \csname floatlist\iffloattype\endcsname % % Is this the first time we've seen this float type? \expandafter\ifx\floatlist\relax \toks0 = {\do}% yes, so just \do \else % had it before, so preserve previous elements in list. \toks0 = \expandafter{\floatlist\do}% \fi % % Remember this xref in the control sequence \floatlistFLOATTYPE, % for later use in \listoffloats. \expandafter\xdef\csname floatlist\iffloattype\endcsname{\the\toks0{#1}}% \fi } % Read the last existing aux file, if any. No error if none exists. % \def\tryauxfile{% \openin 1 \jobname.aux \ifeof 1 \else \readdatafile{aux}% \global\havexrefstrue \fi \closein 1 } \def\setupdatafile{% \catcode`\^^@=\other \catcode`\^^A=\other \catcode`\^^B=\other \catcode`\^^C=\other \catcode`\^^D=\other \catcode`\^^E=\other \catcode`\^^F=\other \catcode`\^^G=\other \catcode`\^^H=\other \catcode`\^^K=\other \catcode`\^^L=\other \catcode`\^^N=\other \catcode`\^^P=\other \catcode`\^^Q=\other \catcode`\^^R=\other \catcode`\^^S=\other \catcode`\^^T=\other \catcode`\^^U=\other \catcode`\^^V=\other \catcode`\^^W=\other \catcode`\^^X=\other \catcode`\^^Z=\other \catcode`\^^[=\other \catcode`\^^\=\other \catcode`\^^]=\other \catcode`\^^^=\other \catcode`\^^_=\other % It was suggested to set the catcode of ^ to 7, which would allow ^^e4 etc. % in xref tags, i.e., node names. But since ^^e4 notation isn't % supported in the main text, it doesn't seem desirable. Furthermore, % that is not enough: for node names that actually contain a ^ % character, we would end up writing a line like this: 'xrdef {'hat % b-title}{'hat b} and \xrdef does a \csname...\endcsname on the first % argument, and \hat is not an expandable control sequence. It could % all be worked out, but why? Either we support ^^ or we don't. % % The other change necessary for this was to define \auxhat: % \def\auxhat{\def^{'hat }}% extra space so ok if followed by letter % and then to call \auxhat in \setq. % \catcode`\^=\other % % Special characters. Should be turned off anyway, but... \catcode`\~=\other \catcode`\[=\other \catcode`\]=\other \catcode`\"=\other \catcode`\_=\other \catcode`\|=\other \catcode`\<=\other \catcode`\>=\other \catcode`\$=\other \catcode`\#=\other \catcode`\&=\other \catcode`\%=\other \catcode`+=\other % avoid \+ for paranoia even though we've turned it off % % This is to support \ in node names and titles, since the \ % characters end up in a \csname. It's easier than % leaving it active and making its active definition an actual \ % character. What I don't understand is why it works in the *value* % of the xrdef. Seems like it should be a catcode12 \, and that % should not typeset properly. But it works, so I'm moving on for % now. --karl, 15jan04. \catcode`\\=\other % % Make the characters 128-255 be printing characters. {% \count1=128 \def\loop{% \catcode\count1=\other \advance\count1 by 1 \ifnum \count1<256 \loop \fi }% }% % % @ is our escape character in .aux files, and we need braces. \catcode`\{=1 \catcode`\}=2 \catcode`\@=0 } \def\readdatafile#1{% \begingroup \setupdatafile \input\jobname.#1 \endgroup} \message{insertions,} % including footnotes. \newcount \footnoteno % The trailing space in the following definition for supereject is % vital for proper filling; pages come out unaligned when you do a % pagealignmacro call if that space before the closing brace is % removed. (Generally, numeric constants should always be followed by a % space to prevent strange expansion errors.) \def\supereject{\par\penalty -20000\footnoteno =0 } % @footnotestyle is meaningful for info output only. \let\footnotestyle=\comment {\catcode `\@=11 % % Auto-number footnotes. Otherwise like plain. \gdef\footnote{% \let\indent=\ptexindent \let\noindent=\ptexnoindent \global\advance\footnoteno by \@ne \edef\thisfootno{$^{\the\footnoteno}$}% % % In case the footnote comes at the end of a sentence, preserve the % extra spacing after we do the footnote number. \let\@sf\empty \ifhmode\edef\@sf{\spacefactor\the\spacefactor}\ptexslash\fi % % Remove inadvertent blank space before typesetting the footnote number. \unskip \thisfootno\@sf \dofootnote }% % Don't bother with the trickery in plain.tex to not require the % footnote text as a parameter. Our footnotes don't need to be so general. % % Oh yes, they do; otherwise, @ifset (and anything else that uses % \parseargline) fails inside footnotes because the tokens are fixed when % the footnote is read. --karl, 16nov96. % \gdef\dofootnote{% \insert\footins\bgroup % We want to typeset this text as a normal paragraph, even if the % footnote reference occurs in (for example) a display environment. % So reset some parameters. \hsize=\pagewidth \interlinepenalty\interfootnotelinepenalty \splittopskip\ht\strutbox % top baseline for broken footnotes \splitmaxdepth\dp\strutbox \floatingpenalty\@MM \leftskip\z@skip \rightskip\z@skip \spaceskip\z@skip \xspaceskip\z@skip \parindent\defaultparindent % \smallfonts \rm % % Because we use hanging indentation in footnotes, a @noindent appears % to exdent this text, so make it be a no-op. makeinfo does not use % hanging indentation so @noindent can still be needed within footnote % text after an @example or the like (not that this is good style). \let\noindent = \relax % % Hang the footnote text off the number. Use \everypar in case the % footnote extends for more than one paragraph. \everypar = {\hang}% \textindent{\thisfootno}% % % Don't crash into the line above the footnote text. Since this % expands into a box, it must come within the paragraph, lest it % provide a place where TeX can split the footnote. \footstrut \futurelet\next\fo@t } }%end \catcode `\@=11 % In case a @footnote appears in a vbox, save the footnote text and create % the real \insert just after the vbox finished. Otherwise, the insertion % would be lost. % Similarily, if a @footnote appears inside an alignment, save the footnote % text to a box and make the \insert when a row of the table is finished. % And the same can be done for other insert classes. --kasal, 16nov03. % Replace the \insert primitive by a cheating macro. % Deeper inside, just make sure that the saved insertions are not spilled % out prematurely. % \def\startsavinginserts{% \ifx \insert\ptexinsert \let\insert\saveinsert \else \let\checkinserts\relax \fi } % This \insert replacement works for both \insert\footins{foo} and % \insert\footins\bgroup foo\egroup, but it doesn't work for \insert27{foo}. % \def\saveinsert#1{% \edef\next{\noexpand\savetobox \makeSAVEname#1}% \afterassignment\next % swallow the left brace \let\temp = } \def\makeSAVEname#1{\makecsname{SAVE\expandafter\gobble\string#1}} \def\savetobox#1{\global\setbox#1 = \vbox\bgroup \unvbox#1} \def\checksaveins#1{\ifvoid#1\else \placesaveins#1\fi} \def\placesaveins#1{% \ptexinsert \csname\expandafter\gobblesave\string#1\endcsname {\box#1}% } % eat @SAVE -- beware, all of them have catcode \other: { \def\dospecials{\do S\do A\do V\do E} \uncatcodespecials % ;-) \gdef\gobblesave @SAVE{} } % initialization: \def\newsaveins #1{% \edef\next{\noexpand\newsaveinsX \makeSAVEname#1}% \next } \def\newsaveinsX #1{% \csname newbox\endcsname #1% \expandafter\def\expandafter\checkinserts\expandafter{\checkinserts \checksaveins #1}% } % initialize: \let\checkinserts\empty \newsaveins\footins \newsaveins\margin % @image. We use the macros from epsf.tex to support this. % If epsf.tex is not installed and @image is used, we complain. % % Check for and read epsf.tex up front. If we read it only at @image % time, we might be inside a group, and then its definitions would get % undone and the next image would fail. \openin 1 = epsf.tex \ifeof 1 \else % Do not bother showing banner with epsf.tex v2.7k (available in % doc/epsf.tex and on ctan). \def\epsfannounce{\toks0 = }% \input epsf.tex \fi \closein 1 % % We will only complain once about lack of epsf.tex. \newif\ifwarnednoepsf \newhelp\noepsfhelp{epsf.tex must be installed for images to work. It is also included in the Texinfo distribution, or you can get it from ftp://tug.org/tex/epsf.tex.} % \def\image#1{% \ifx\epsfbox\undefined \ifwarnednoepsf \else \errhelp = \noepsfhelp \errmessage{epsf.tex not found, images will be ignored}% \global\warnednoepsftrue \fi \else \imagexxx #1,,,,,\finish \fi } % % Arguments to @image: % #1 is (mandatory) image filename; we tack on .eps extension. % #2 is (optional) width, #3 is (optional) height. % #4 is (ignored optional) html alt text. % #5 is (ignored optional) extension. % #6 is just the usual extra ignored arg for parsing this stuff. \newif\ifimagevmode \def\imagexxx#1,#2,#3,#4,#5,#6\finish{\begingroup \catcode`\^^M = 5 % in case we're inside an example \normalturnoffactive % allow _ et al. in names % If the image is by itself, center it. \ifvmode \imagevmodetrue \nobreak\bigskip % Usually we'll have text after the image which will insert % \parskip glue, so insert it here too to equalize the space % above and below. \nobreak\vskip\parskip \nobreak \line\bgroup \fi % % Output the image. \ifpdf \dopdfimage{#1}{#2}{#3}% \else % \epsfbox itself resets \epsf?size at each figure. \setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \epsfxsize=#2\relax \fi \setbox0 = \hbox{\ignorespaces #3}\ifdim\wd0 > 0pt \epsfysize=#3\relax \fi \epsfbox{#1.eps}% \fi % \ifimagevmode \egroup \bigbreak \fi % space after the image \endgroup} % @float FLOATTYPE,LABEL,LOC ... @end float for displayed figures, tables, % etc. We don't actually implement floating yet, we always include the % float "here". But it seemed the best name for the future. % \envparseargdef\float{\eatcommaspace\eatcommaspace\dofloat#1, , ,\finish} % There may be a space before second and/or third parameter; delete it. \def\eatcommaspace#1, {#1,} % #1 is the optional FLOATTYPE, the text label for this float, typically % "Figure", "Table", "Example", etc. Can't contain commas. If omitted, % this float will not be numbered and cannot be referred to. % % #2 is the optional xref label. Also must be present for the float to % be referable. % % #3 is the optional positioning argument; for now, it is ignored. It % will somehow specify the positions allowed to float to (here, top, bottom). % % We keep a separate counter for each FLOATTYPE, which we reset at each % chapter-level command. \let\resetallfloatnos=\empty % \def\dofloat#1,#2,#3,#4\finish{% \let\thiscaption=\empty \let\thisshortcaption=\empty % % don't lose footnotes inside @float. % % BEWARE: when the floats start float, we have to issue warning whenever an % insert appears inside a float which could possibly float. --kasal, 26may04 % \startsavinginserts % % We can't be used inside a paragraph. \par % \vtop\bgroup \def\floattype{#1}% \def\floatlabel{#2}% \def\floatloc{#3}% we do nothing with this yet. % \ifx\floattype\empty \let\safefloattype=\empty \else {% % the floattype might have accents or other special characters, % but we need to use it in a control sequence name. \indexnofonts \turnoffactive \xdef\safefloattype{\floattype}% }% \fi % % If label is given but no type, we handle that as the empty type. \ifx\floatlabel\empty \else % We want each FLOATTYPE to be numbered separately (Figure 1, % Table 1, Figure 2, ...). (And if no label, no number.) % \expandafter\getfloatno\csname\safefloattype floatno\endcsname \global\advance\floatno by 1 % {% % This magic value for \thissection is output by \setref as the % XREFLABEL-title value. \xrefX uses it to distinguish float % labels (which have a completely different output format) from % node and anchor labels. And \xrdef uses it to construct the % lists of floats. % \edef\thissection{\floatmagic=\safefloattype}% \setref{\floatlabel}{Yfloat}% }% \fi % % start with \parskip glue, I guess. \vskip\parskip % % Don't suppress indentation if a float happens to start a section. \restorefirstparagraphindent } % we have these possibilities: % @float Foo,lbl & @caption{Cap}: Foo 1.1: Cap % @float Foo,lbl & no caption: Foo 1.1 % @float Foo & @caption{Cap}: Foo: Cap % @float Foo & no caption: Foo % @float ,lbl & Caption{Cap}: 1.1: Cap % @float ,lbl & no caption: 1.1 % @float & @caption{Cap}: Cap % @float & no caption: % \def\Efloat{% \let\floatident = \empty % % In all cases, if we have a float type, it comes first. \ifx\floattype\empty \else \def\floatident{\floattype}\fi % % If we have an xref label, the number comes next. \ifx\floatlabel\empty \else \ifx\floattype\empty \else % if also had float type, need tie first. \appendtomacro\floatident{\tie}% \fi % the number. \appendtomacro\floatident{\chaplevelprefix\the\floatno}% \fi % % Start the printed caption with what we've constructed in % \floatident, but keep it separate; we need \floatident again. \let\captionline = \floatident % \ifx\thiscaption\empty \else \ifx\floatident\empty \else \appendtomacro\captionline{: }% had ident, so need a colon between \fi % % caption text. \appendtomacro\captionline{\scanexp\thiscaption}% \fi % % If we have anything to print, print it, with space before. % Eventually this needs to become an \insert. \ifx\captionline\empty \else \vskip.5\parskip \captionline % % Space below caption. \vskip\parskip \fi % % If have an xref label, write the list of floats info. Do this % after the caption, to avoid chance of it being a breakpoint. \ifx\floatlabel\empty \else % Write the text that goes in the lof to the aux file as % \floatlabel-lof. Besides \floatident, we include the short % caption if specified, else the full caption if specified, else nothing. {% \atdummies % % since we read the caption text in the macro world, where ^^M % is turned into a normal character, we have to scan it back, so % we don't write the literal three characters "^^M" into the aux file. \scanexp{% \xdef\noexpand\gtemp{% \ifx\thisshortcaption\empty \thiscaption \else \thisshortcaption \fi }% }% \immediate\write\auxfile{@xrdef{\floatlabel-lof}{\floatident \ifx\gtemp\empty \else : \gtemp \fi}}% }% \fi \egroup % end of \vtop % % place the captured inserts % % BEWARE: when the floats start floating, we have to issue warning % whenever an insert appears inside a float which could possibly % float. --kasal, 26may04 % \checkinserts } % Append the tokens #2 to the definition of macro #1, not expanding either. % \def\appendtomacro#1#2{% \expandafter\def\expandafter#1\expandafter{#1#2}% } % @caption, @shortcaption % \def\caption{\docaption\thiscaption} \def\shortcaption{\docaption\thisshortcaption} \def\docaption{\checkenv\float \bgroup\scanargctxt\defcaption} \def\defcaption#1#2{\egroup \def#1{#2}} % The parameter is the control sequence identifying the counter we are % going to use. Create it if it doesn't exist and assign it to \floatno. \def\getfloatno#1{% \ifx#1\relax % Haven't seen this figure type before. \csname newcount\endcsname #1% % % Remember to reset this floatno at the next chap. \expandafter\gdef\expandafter\resetallfloatnos \expandafter{\resetallfloatnos #1=0 }% \fi \let\floatno#1% } % \setref calls this to get the XREFLABEL-snt value. We want an @xref % to the FLOATLABEL to expand to "Figure 3.1". We call \setref when we % first read the @float command. % \def\Yfloat{\floattype@tie \chaplevelprefix\the\floatno}% % Magic string used for the XREFLABEL-title value, so \xrefX can % distinguish floats from other xref types. \def\floatmagic{!!float!!} % #1 is the control sequence we are passed; we expand into a conditional % which is true if #1 represents a float ref. That is, the magic % \thissection value which we \setref above. % \def\iffloat#1{\expandafter\doiffloat#1==\finish} % % #1 is (maybe) the \floatmagic string. If so, #2 will be the % (safe) float type for this float. We set \iffloattype to #2. % \def\doiffloat#1=#2=#3\finish{% \def\temp{#1}% \def\iffloattype{#2}% \ifx\temp\floatmagic } % @listoffloats FLOATTYPE - print a list of floats like a table of contents. % \parseargdef\listoffloats{% \def\floattype{#1}% floattype {% % the floattype might have accents or other special characters, % but we need to use it in a control sequence name. \indexnofonts \turnoffactive \xdef\safefloattype{\floattype}% }% % % \xrdef saves the floats as a \do-list in \floatlistSAFEFLOATTYPE. \expandafter\ifx\csname floatlist\safefloattype\endcsname \relax \ifhavexrefs % if the user said @listoffloats foo but never @float foo. \message{\linenumber No `\safefloattype' floats to list.}% \fi \else \begingroup \leftskip=\tocindent % indent these entries like a toc \let\do=\listoffloatsdo \csname floatlist\safefloattype\endcsname \endgroup \fi } % This is called on each entry in a list of floats. We're passed the % xref label, in the form LABEL-title, which is how we save it in the % aux file. We strip off the -title and look up \XRLABEL-lof, which % has the text we're supposed to typeset here. % % Figures without xref labels will not be included in the list (since % they won't appear in the aux file). % \def\listoffloatsdo#1{\listoffloatsdoentry#1\finish} \def\listoffloatsdoentry#1-title\finish{{% % Can't fully expand XR#1-lof because it can contain anything. Just % pass the control sequence. On the other hand, XR#1-pg is just the % page number, and we want to fully expand that so we can get a link % in pdf output. \toksA = \expandafter{\csname XR#1-lof\endcsname}% % % use the same \entry macro we use to generate the TOC and index. \edef\writeentry{\noexpand\entry{\the\toksA}{\csname XR#1-pg\endcsname}}% \writeentry }} \message{localization,} % and i18n. % @documentlanguage is usually given very early, just after % @setfilename. If done too late, it may not override everything % properly. Single argument is the language abbreviation. % It would be nice if we could set up a hyphenation file here. % \parseargdef\documentlanguage{% \tex % read txi-??.tex file in plain TeX. % Read the file if it exists. \openin 1 txi-#1.tex \ifeof 1 \errhelp = \nolanghelp \errmessage{Cannot read language file txi-#1.tex}% \else \input txi-#1.tex \fi \closein 1 \endgroup } \newhelp\nolanghelp{The given language definition file cannot be found or is empty. Maybe you need to install it? In the current directory should work if nowhere else does.} % @documentencoding should change something in TeX eventually, most % likely, but for now just recognize it. \let\documentencoding = \comment % Page size parameters. % \newdimen\defaultparindent \defaultparindent = 15pt \chapheadingskip = 15pt plus 4pt minus 2pt \secheadingskip = 12pt plus 3pt minus 2pt \subsecheadingskip = 9pt plus 2pt minus 2pt % Prevent underfull vbox error messages. \vbadness = 10000 % Don't be so finicky about underfull hboxes, either. \hbadness = 2000 % Following George Bush, just get rid of widows and orphans. \widowpenalty=10000 \clubpenalty=10000 % Use TeX 3.0's \emergencystretch to help line breaking, but if we're % using an old version of TeX, don't do anything. We want the amount of % stretch added to depend on the line length, hence the dependence on % \hsize. We call this whenever the paper size is set. % \def\setemergencystretch{% \ifx\emergencystretch\thisisundefined % Allow us to assign to \emergencystretch anyway. \def\emergencystretch{\dimen0}% \else \emergencystretch = .15\hsize \fi } % Parameters in order: 1) textheight; 2) textwidth; % 3) voffset; 4) hoffset; 5) binding offset; 6) topskip; % 7) physical page height; 8) physical page width. % % We also call \setleading{\textleading}, so the caller should define % \textleading. The caller should also set \parskip. % \def\internalpagesizes#1#2#3#4#5#6#7#8{% \voffset = #3\relax \topskip = #6\relax \splittopskip = \topskip % \vsize = #1\relax \advance\vsize by \topskip \outervsize = \vsize \advance\outervsize by 2\topandbottommargin \pageheight = \vsize % \hsize = #2\relax \outerhsize = \hsize \advance\outerhsize by 0.5in \pagewidth = \hsize % \normaloffset = #4\relax \bindingoffset = #5\relax % \ifpdf \pdfpageheight #7\relax \pdfpagewidth #8\relax \fi % \setleading{\textleading} % \parindent = \defaultparindent \setemergencystretch } % @letterpaper (the default). \def\letterpaper{{\globaldefs = 1 \parskip = 3pt plus 2pt minus 1pt \textleading = 13.2pt % % If page is nothing but text, make it come out even. \internalpagesizes{46\baselineskip}{6in}% {\voffset}{.25in}% {\bindingoffset}{36pt}% {11in}{8.5in}% }} % Use @smallbook to reset parameters for 7x9.25 trim size. \def\smallbook{{\globaldefs = 1 \parskip = 2pt plus 1pt \textleading = 12pt % \internalpagesizes{7.5in}{5in}% {\voffset}{.25in}% {\bindingoffset}{16pt}% {9.25in}{7in}% % \lispnarrowing = 0.3in \tolerance = 700 \hfuzz = 1pt \contentsrightmargin = 0pt \defbodyindent = .5cm }} % Use @smallerbook to reset parameters for 6x9 trim size. % (Just testing, parameters still in flux.) \def\smallerbook{{\globaldefs = 1 \parskip = 1.5pt plus 1pt \textleading = 12pt % \internalpagesizes{7.4in}{4.8in}% {-.2in}{-.4in}% {0pt}{14pt}% {9in}{6in}% % \lispnarrowing = 0.25in \tolerance = 700 \hfuzz = 1pt \contentsrightmargin = 0pt \defbodyindent = .4cm }} % Use @afourpaper to print on European A4 paper. \def\afourpaper{{\globaldefs = 1 \parskip = 3pt plus 2pt minus 1pt \textleading = 13.2pt % % Double-side printing via postscript on Laserjet 4050 % prints double-sided nicely when \bindingoffset=10mm and \hoffset=-6mm. % To change the settings for a different printer or situation, adjust % \normaloffset until the front-side and back-side texts align. Then % do the same for \bindingoffset. You can set these for testing in % your texinfo source file like this: % @tex % \global\normaloffset = -6mm % \global\bindingoffset = 10mm % @end tex \internalpagesizes{51\baselineskip}{160mm} {\voffset}{\hoffset}% {\bindingoffset}{44pt}% {297mm}{210mm}% % \tolerance = 700 \hfuzz = 1pt \contentsrightmargin = 0pt \defbodyindent = 5mm }} % Use @afivepaper to print on European A5 paper. % From romildo@urano.iceb.ufop.br, 2 July 2000. % He also recommends making @example and @lisp be small. \def\afivepaper{{\globaldefs = 1 \parskip = 2pt plus 1pt minus 0.1pt \textleading = 12.5pt % \internalpagesizes{160mm}{120mm}% {\voffset}{\hoffset}% {\bindingoffset}{8pt}% {210mm}{148mm}% % \lispnarrowing = 0.2in \tolerance = 800 \hfuzz = 1.2pt \contentsrightmargin = 0pt \defbodyindent = 2mm \tableindent = 12mm }} % A specific text layout, 24x15cm overall, intended for A4 paper. \def\afourlatex{{\globaldefs = 1 \afourpaper \internalpagesizes{237mm}{150mm}% {\voffset}{4.6mm}% {\bindingoffset}{7mm}% {297mm}{210mm}% % % Must explicitly reset to 0 because we call \afourpaper. \globaldefs = 0 }} % Use @afourwide to print on A4 paper in landscape format. \def\afourwide{{\globaldefs = 1 \afourpaper \internalpagesizes{241mm}{165mm}% {\voffset}{-2.95mm}% {\bindingoffset}{7mm}% {297mm}{210mm}% \globaldefs = 0 }} % @pagesizes TEXTHEIGHT[,TEXTWIDTH] % Perhaps we should allow setting the margins, \topskip, \parskip, % and/or leading, also. Or perhaps we should compute them somehow. % \parseargdef\pagesizes{\pagesizesyyy #1,,\finish} \def\pagesizesyyy#1,#2,#3\finish{{% \setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \hsize=#2\relax \fi \globaldefs = 1 % \parskip = 3pt plus 2pt minus 1pt \setleading{\textleading}% % \dimen0 = #1 \advance\dimen0 by \voffset % \dimen2 = \hsize \advance\dimen2 by \normaloffset % \internalpagesizes{#1}{\hsize}% {\voffset}{\normaloffset}% {\bindingoffset}{44pt}% {\dimen0}{\dimen2}% }} % Set default to letter. % \letterpaper \message{and turning on texinfo input format.} % Define macros to output various characters with catcode for normal text. \catcode`\"=\other \catcode`\~=\other \catcode`\^=\other \catcode`\_=\other \catcode`\|=\other \catcode`\<=\other \catcode`\>=\other \catcode`\+=\other \catcode`\$=\other \def\normaldoublequote{"} \def\normaltilde{~} \def\normalcaret{^} \def\normalunderscore{_} \def\normalverticalbar{|} \def\normalless{<} \def\normalgreater{>} \def\normalplus{+} \def\normaldollar{$}%$ font-lock fix % This macro is used to make a character print one way in \tt % (where it can probably be output as-is), and another way in other fonts, % where something hairier probably needs to be done. % % #1 is what to print if we are indeed using \tt; #2 is what to print % otherwise. Since all the Computer Modern typewriter fonts have zero % interword stretch (and shrink), and it is reasonable to expect all % typewriter fonts to have this, we can check that font parameter. % \def\ifusingtt#1#2{\ifdim \fontdimen3\font=0pt #1\else #2\fi} % Same as above, but check for italic font. Actually this also catches % non-italic slanted fonts since it is impossible to distinguish them from % italic fonts. But since this is only used by $ and it uses \sl anyway % this is not a problem. \def\ifusingit#1#2{\ifdim \fontdimen1\font>0pt #1\else #2\fi} % Turn off all special characters except @ % (and those which the user can use as if they were ordinary). % Most of these we simply print from the \tt font, but for some, we can % use math or other variants that look better in normal text. \catcode`\"=\active \def\activedoublequote{{\tt\char34}} \let"=\activedoublequote \catcode`\~=\active \def~{{\tt\char126}} \chardef\hat=`\^ \catcode`\^=\active \def^{{\tt \hat}} \catcode`\_=\active \def_{\ifusingtt\normalunderscore\_} \let\realunder=_ % Subroutine for the previous macro. \def\_{\leavevmode \kern.07em \vbox{\hrule width.3em height.1ex}\kern .07em } \catcode`\|=\active \def|{{\tt\char124}} \chardef \less=`\< \catcode`\<=\active \def<{{\tt \less}} \chardef \gtr=`\> \catcode`\>=\active \def>{{\tt \gtr}} \catcode`\+=\active \def+{{\tt \char 43}} \catcode`\$=\active \def${\ifusingit{{\sl\$}}\normaldollar}%$ font-lock fix % If a .fmt file is being used, characters that might appear in a file % name cannot be active until we have parsed the command line. % So turn them off again, and have \everyjob (or @setfilename) turn them on. % \otherifyactive is called near the end of this file. \def\otherifyactive{\catcode`+=\other \catcode`\_=\other} % Used sometimes to turn off (effectively) the active characters even after % parsing them. \def\turnoffactive{% \normalturnoffactive \otherbackslash } \catcode`\@=0 % \backslashcurfont outputs one backslash character in current font, % as in \char`\\. \global\chardef\backslashcurfont=`\\ \global\let\rawbackslashxx=\backslashcurfont % let existing .??s files work % \realbackslash is an actual character `\' with catcode other, and % \doublebackslash is two of them (for the pdf outlines). {\catcode`\\=\other @gdef@realbackslash{\} @gdef@doublebackslash{\\}} % In texinfo, backslash is an active character; it prints the backslash % in fixed width font. \catcode`\\=\active @def@normalbackslash{{@tt@backslashcurfont}} % On startup, @fixbackslash assigns: % @let \ = @normalbackslash % \rawbackslash defines an active \ to do \backslashcurfont. % \otherbackslash defines an active \ to be a literal `\' character with % catcode other. @gdef@rawbackslash{@let\=@backslashcurfont} @gdef@otherbackslash{@let\=@realbackslash} % Same as @turnoffactive except outputs \ as {\tt\char`\\} instead of % the literal character `\'. % @def@normalturnoffactive{% @let\=@normalbackslash @let"=@normaldoublequote @let~=@normaltilde @let^=@normalcaret @let_=@normalunderscore @let|=@normalverticalbar @let<=@normalless @let>=@normalgreater @let+=@normalplus @let$=@normaldollar %$ font-lock fix @unsepspaces } % Make _ and + \other characters, temporarily. % This is canceled by @fixbackslash. @otherifyactive % If a .fmt file is being used, we don't want the `\input texinfo' to show up. % That is what \eatinput is for; after that, the `\' should revert to printing % a backslash. % @gdef@eatinput input texinfo{@fixbackslash} @global@let\ = @eatinput % On the other hand, perhaps the file did not have a `\input texinfo'. Then % the first `\' in the file would cause an error. This macro tries to fix % that, assuming it is called before the first `\' could plausibly occur. % Also turn back on active characters that might appear in the input % file name, in case not using a pre-dumped format. % @gdef@fixbackslash{% @ifx\@eatinput @let\ = @normalbackslash @fi @catcode`+=@active @catcode`@_=@active } % Say @foo, not \foo, in error messages. @escapechar = `@@ % These look ok in all fonts, so just make them not special. @catcode`@& = @other @catcode`@# = @other @catcode`@% = @other @c Local variables: @c eval: (add-hook 'write-file-hooks 'time-stamp) @c page-delimiter: "^\\\\message" @c time-stamp-start: "def\\\\texinfoversion{" @c time-stamp-format: "%:y-%02m-%02d.%02H" @c time-stamp-end: "}" @c End: @c vim:sw=2: @ignore arch-tag: e1b36e32-c96e-4135-a41a-0b2efa2ea115 @end ignore DrIFT-2.2.3/ac-macros/missing0000755000076400007640000002557710753606417012654 00000000000000#! /bin/sh # Common stub for a few missing GNU programs while installing. scriptversion=2006-05-10.23 # Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006 # Free Software Foundation, Inc. # Originally by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try \`$0 --help' for more information" exit 1 fi run=: sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' sed_minuso='s/.* -o \([^ ]*\).*/\1/p' # In the cases where this matters, `missing' is being run in the # srcdir already. if test -f configure.ac; then configure_ac=configure.ac else configure_ac=configure.in fi msg="missing on your system" case $1 in --run) # Try to run requested program, and just exit if it succeeds. run= shift "$@" && exit 0 # Exit code 63 means version mismatch. This often happens # when the user try to use an ancient version of a tool on # a file that requires a minimum version. In this case we # we should proceed has if the program had been absent, or # if --run hadn't been passed. if test $? = 63; then run=: msg="probably too old" fi ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an error status if there is no known handling for PROGRAM. Options: -h, --help display this help and exit -v, --version output version information and exit --run try to run the given command, and emulate it if it fails Supported PROGRAM values: aclocal touch file \`aclocal.m4' autoconf touch file \`configure' autoheader touch file \`config.h.in' autom4te touch the output file, or create a stub one automake touch all \`Makefile.in' files bison create \`y.tab.[ch]', if possible, from existing .[ch] flex create \`lex.yy.c', if possible, from existing .c help2man touch the output file lex create \`lex.yy.c', if possible, from existing .c makeinfo touch the output file tar try tar, gnutar, gtar, then tar without non-portable flags yacc create \`y.tab.[ch]', if possible, from existing .[ch] Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: Unknown \`$1' option" echo 1>&2 "Try \`$0 --help' for more information" exit 1 ;; esac # Now exit if we have it, but it failed. Also exit now if we # don't have it and --version was passed (most likely to detect # the program). case $1 in lex|yacc) # Not GNU programs, they don't have --version. ;; tar) if test -n "$run"; then echo 1>&2 "ERROR: \`tar' requires --run" exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then exit 1 fi ;; *) if test -z "$run" && ($1 --version) > /dev/null 2>&1; then # We have it, but it failed. exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then # Could not run --version or --help. This is probably someone # running `$TOOL --version' or `$TOOL --help' to check whether # $TOOL exists and not knowing $TOOL uses missing. exit 1 fi ;; esac # If it does not exist, or fails to run (possibly an outdated version), # try to emulate it. case $1 in aclocal*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." touch aclocal.m4 ;; autoconf) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." touch configure ;; autoheader) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acconfig.h' or \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` test -z "$files" && files="config.h" touch_files= for f in $files; do case $f in *:*) touch_files="$touch_files "`echo "$f" | sed -e 's/^[^:]*://' -e 's/:.*//'`;; *) touch_files="$touch_files $f.in";; esac done touch $touch_files ;; automake*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." find . -type f -name Makefile.am -print | sed 's/\.am$/.in/' | while read f; do touch "$f"; done ;; autom4te) echo 1>&2 "\ WARNING: \`$1' is needed, but is $msg. You might have modified some files without having the proper tools for further handling them. You can get \`$1' as part of \`Autoconf' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo "#! /bin/sh" echo "# Created by GNU Automake missing as a replacement of" echo "# $ $@" echo "exit 0" chmod +x $file exit 1 fi ;; bison|yacc) echo 1>&2 "\ WARNING: \`$1' $msg. You should only need it if you modified a \`.y' file. You may need the \`Bison' package in order for those modifications to take effect. You can get \`Bison' from any GNU archive site." rm -f y.tab.c y.tab.h if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.y) SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.c fi SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.h fi ;; esac fi if test ! -f y.tab.h; then echo >y.tab.h fi if test ! -f y.tab.c; then echo 'main() { return 0; }' >y.tab.c fi ;; lex|flex) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.l' file. You may need the \`Flex' package in order for those modifications to take effect. You can get \`Flex' from any GNU archive site." rm -f lex.yy.c if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.l) SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" lex.yy.c fi ;; esac fi if test ! -f lex.yy.c; then echo 'main() { return 0; }' >lex.yy.c fi ;; help2man) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a dependency of a manual page. You may need the \`Help2man' package in order for those modifications to take effect. You can get \`Help2man' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo ".ab help2man is required to generate this page" exit 1 fi ;; makeinfo) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.texi' or \`.texinfo' file, or any other file indirectly affecting the aspect of the manual. The spurious call might also be the consequence of using a buggy \`make' (AIX, DU, IRIX). You might want to install the \`Texinfo' package or the \`GNU make' package. Grab either from any GNU archive site." # The file to touch is that specified with -o ... file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -z "$file"; then # ... or it is the one specified with @setfilename ... infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` file=`sed -n ' /^@setfilename/{ s/.* \([^ ]*\) *$/\1/ p q }' $infile` # ... or it is derived from the source name (dir/f.texi becomes f.info) test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info fi # If the file does not exist, the user really needs makeinfo; # let's fail without touching anything. test -f $file || exit 1 touch $file ;; tar) shift # We have already tried tar in the generic part. # Look for gnutar/gtar before invocation to avoid ugly error # messages. if (gnutar --version > /dev/null 2>&1); then gnutar "$@" && exit 0 fi if (gtar --version > /dev/null 2>&1); then gtar "$@" && exit 0 fi firstarg="$1" if shift; then case $firstarg in *o*) firstarg=`echo "$firstarg" | sed s/o//` tar "$firstarg" "$@" && exit 0 ;; esac case $firstarg in *h*) firstarg=`echo "$firstarg" | sed s/h//` tar "$firstarg" "$@" && exit 0 ;; esac fi echo 1>&2 "\ WARNING: I can't seem to be able to run \`tar' with the given arguments. You may want to install GNU tar or Free paxutils, or check the command line arguments." exit 1 ;; *) echo 1>&2 "\ WARNING: \`$1' is needed, and is $msg. You might have modified some files without having the proper tools for further handling them. Check the \`README' file, it often tells you about the needed prerequisites for installing this package. You may also peek at any GNU archive site, in case some other package would contain this missing \`$1' program." exit 1 ;; esac exit 0 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-end: "$" # End: DrIFT-2.2.3/ac-macros/acincludepackage.m40000644000076400007640000000637310753606147014753 00000000000000dnl Borrowed from Merijn de Jonge: dnl AC_PACKAGE_REQUIRE dnl AC_PROGRAM_REQUIRE dnl AC_PACKAGE_REQUIRE1 dnl AC_PACKAGE_REQUIRE dnl Add --with- switch. If this switch was not specified try to locate dnl it by searching for one or more programs contained in that package. dnl Abort configuration when no program could be found. The variable PKG contains the dnl full path to the found program on return. dnl dnl Usage: dnl AC_PACKAGE_REQUIRE(package, programs, usage ) dnl dnl When --with- was specified, the variable will contain the dnl value of the argument of the switch. If the switch was not specified, dnl its location is determined automatically using the argument. dnl This is a list of programs to search for to find the location of the dnl package. If one such program is found, is set to dnl /../.. dnl dnl Example dnl AC_PACKAGE_REQUIRE(wish, wish8.0 wish8.1 wish, dnl [ --with-wish location of wish program]) dnl dnl This would set the variable WISH to the location of the first program dnl found or to the program as specified with the --with-wish switch. AC_DEFUN([AC_PACKAGE_REQUIRE], [AC_PACKAGE_REQUIRE1([$1],[$2],[$3], dnl Program found; evaluate [translit($1,-a-z,_A-Z)=`dirname \`dirname $translit($1,a-z-,A-Z_)\``]) ]) dnl AC_PROGRAM_REQUIRE dnl Add --with- switch. If this switch was not specified try to locate dnl it by searching for one or more programs contained in that package. dnl Abort configuration when no program could be found. The variable PKG contains the dnl full path to the found program on return. dnl dnl Usage: dnl AC_PACKAGE_REQUIRE(package, programs, usage ) dnl dnl When --with- was specified, the variable will contain the dnl value of the argument of the switch. If the switch was not specified, dnl its location is determined automatically using the argument. dnl This is a list of programs to search for to find the location of the dnl package. If one such program is found, is set to dnl dnl Example dnl AC_PROGRAM_REQUIRE(wish, wish8.0 wish8.1 wish, dnl [ --with-wish location of wish program]) dnl dnl This would set the variable WISH to the location of the first program dnl found or to the program as specified with the --with-wish switch. AC_DEFUN([AC_PROGRAM_REQUIRE], [AC_PACKAGE_REQUIRE1([$1],[$2],[$3],)]) dnl AC_PACKAGE_REQUIRE1 dnl Implementation for AC_PACKAGE_REQUIRE and AC_PROGRAM_REQUIRE. The dnl commands in the fourht argument are evaluated after a successful dnl search for a package program and is used to obtain the installation dnl directory from a full path to a program. For example to obtain dnl /usr/local from /usr/local/bin/wish AC_DEFUN([AC_PACKAGE_REQUIRE1], [ dnl Add configuration switch AC_ARG_WITH($1, [$3], dnl switch was specified translit($1,a-z-,A-Z_)=${withval}, dnl If switch not specified, try to find the program automatically [ AC_PATH_PROGS(translit($1,a-z-,A-Z_),$2, [ dnl Not found; abort configuration AC_ERROR(Required \"$1\" program not found.) ]) dnl Program found; evaluate $4 ]) ]) DrIFT-2.2.3/ac-macros/install-sh0000755000076400007640000003160010753606417013241 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2006-10-14.15 # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit="${DOITPROG-}" if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. mvprog="${MVPROG-mv}" cpprog="${CPPROG-cp}" chmodprog="${CHMODPROG-chmod}" chownprog="${CHOWNPROG-chown}" chgrpprog="${CHGRPPROG-chgrp}" stripprog="${STRIPPROG-strip}" rmprog="${RMPROG-rm}" mkdirprog="${MKDIRPROG-mkdir}" posix_glob= posix_mkdir= # Desired mode of installed file. mode=0755 chmodcmd=$chmodprog chowncmd= chgrpcmd= stripcmd= rmcmd="$rmprog -f" mvcmd="$mvprog" src= dst= dir_arg= dstarg= no_target_directory= usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: -c (ignored) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. --help display this help and exit. --version display version info and exit. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) shift continue;; -d) dir_arg=true shift continue;; -g) chgrpcmd="$chgrpprog $2" shift shift continue;; --help) echo "$usage"; exit $?;; -m) mode=$2 shift shift case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac continue;; -o) chowncmd="$chownprog $2" shift shift continue;; -s) stripcmd=$stripprog shift continue;; -t) dstarg=$2 shift shift continue;; -T) no_target_directory=true shift continue;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac done if test $# -ne 0 && test -z "$dir_arg$dstarg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dstarg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dstarg" shift # fnord fi shift # arg dstarg=$arg done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then trap '(exit $?); exit' 1 2 13 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names starting with `-'. case $src in -*) src=./$src ;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dstarg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dstarg # Protect names starting with `-'. case $dst in -*) dst=./$dst ;; esac # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dstarg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix=/ ;; -*) prefix=./ ;; *) prefix= ;; esac case $posix_glob in '') if (set -f) 2>/dev/null; then posix_glob=true else posix_glob=false fi ;; esac oIFS=$IFS IFS=/ $posix_glob && set -f set fnord $dstdir shift $posix_glob && set +f IFS=$oIFS prefixes= for d do test -z "$d" && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # Now rename the file to the real destination. { $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null \ || { # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { if test -f "$dst"; then $doit $rmcmd -f "$dst" 2>/dev/null \ || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null \ && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }; }\ || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } else : fi } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } } || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-end: "$" # End: DrIFT-2.2.3/Makefile.am0000644000076400007640000000311710753606455011430 00000000000000AUTOMAKE_OPTIONS = no-dependencies foreign ACLOCAL_AMFLAGS=-I ac-macros info_TEXINFOS = docs/drift.texi docs/drift.html: docs/drift.texi texi2html -monolithic $< -o $@ SUBDIRS = src example EXTRA_DIST = configure.ac \ Changelog README.old drift-ghc.in code \ code/FunctorM.hs code/GhcBinary.hs code/README.txt \ ac-macros/acincludepackage.m4 LICENSE docs/drift.html bin_SCRIPTS = drift-ghc PUBLISH_DIR = /home/john/public_html/computer/haskell/DrIFT PUBLISH_FILES = docs/drift.html docs/drift.ps drift-list.txt BINDISTNAME=@PACKAGE_NAME@-@VERSION@-`uname -s`-`uname -m` bin-dist: (cd ./src; $(MAKE) bin-dist) mv ./src/$(BINDISTNAME).tar.gz . drift-list.txt: src/DrIFT src/DrIFT -l > drift-list.txt publish: drift-list.txt docs bin-dist dist # rpm rm -f -- $(PUBLISH_DIR)/drop/$(PACKAGE_NAME)-*-*-*.tar.gz # rm -f -- $(PUBLISH_DIR)/drop/$(PACKAGE)-*.rpm rm -f -- "$(PUBLISH_DIR)/drop/$(PACKAGE_NAME)-$(VERSION).tar.gz" cp -- "$(BINDISTNAME).tar.gz" "$(PUBLISH_DIR)/drop" cp -- "$(PACKAGE_NAME)-$(VERSION).tar.gz" "$(PUBLISH_DIR)/drop" cp -- $(PUBLISH_FILES) $(PUBLISH_DIR) # cp -- "$(HOME)/redhat/RPMS/i386/$(PACKAGE)-$(VERSION)-1.i386.rpm" "$(PUBLISH_DIR)/drop" # cp -- "$(HOME)/redhat/SRPMS/$(PACKAGE)-$(VERSION)-1.src.rpm" "$(PUBLISH_DIR)/drop" cp -- Changelog $(PUBLISH_DIR) make -C $(PUBLISH_DIR) || true rpm: depend $(PACKAGE_NAME).spec dist cp $(PACKAGE_NAME)-$(VERSION).tar.gz $(HOME)/var/rpm/SOURCES/ rpmbuild -ba $(PACKAGE_NAME).spec depend: $(MAKE) -C src depend docs: docs/drift.html docs/drift.ps .PHONY: docs depend rpm publish bin-dist DrIFT-2.2.3/Changelog0000644000076400007640000001233010753606147011201 00000000000000DrIFT-2.2.2: * redid build model, collect deriving rules automatically. DrIFT-2.2.1: Mon Aug 14 19:22:26 PDT 2006 John Meacham * update autotools to make autoreconf work * update README to reflect there no longer being a 'reconf' script * get rid of recursive make in docs/ subdirectory Thu Jan 25 21:04:02 PST 2007 John Meacham * add support for Data.Binary, move old Binary to BitsBinary DrIFT-2.2.0: Mon Apr 10 00:39:24 PDT 2006 John Meacham * add note about reconf script * make the parser handle newtype deriving without bailing out * fix bug in Ord deriving rule Wed Jan 18 03:59:21 PST 2006 David Roundy * fix typo in src/Makefile.am Sun Nov 27 02:59:14 PST 2005 Malcolm.Wallace@cs.york.ac.uk * Change rule for Haskell2XmlNew into XmlContent, add Parse rule. The Haskell2XmlNew class was a temporary staging post on the way to developing a new XmlContent class, available in HaXml-1.14 onwards. This patch updates the name, and the rule, to match the latest developments. It also adds support for the textual (non-XML) Parse class, which is an experimental replacement for the Read class, using monadic parser combinators. This is also currently defined in HaXml-1.14. * docs should refer to StandardRules.hs, not .lhs * fix docs for Hugs usage * keep docs of --list option up-to-date Tue Nov 15 18:09:40 PST 2005 Einar Karttunen * Hook Arbitrary into DrIFT proper * Add UserRuleArbitrary Thu Nov 3 02:52:38 PST 2005 John Meacham * fix data derving for new versions of ghc DrIFT-2.1.2: * fix rpm building, autoconf and some general make issues * fix warnings in generated binary instances * fix autoconf warnings Mon Sep 12 08:11:10 PDT 2005 Malcolm.Wallace@cs.york.ac.uk * support for Haskell2XmlNew The HaXml library Haskell2Xml has been extended to make secondary parsing of the generic XML content tree into a typed value more robust. An API change means that a new version of the class (unimaginatively called Haskell2XmlNew) exists, and this patch adds the ability to generate parsing code for the new class. * doc updates Fix some very out-of-date statements in the manual. Tue May 31 16:17:12 PDT 2005 Samuel Bronson * avoid problems with hmake by invoking ghc -M directly * Default to GHC to avoid NHC's tiny heap DrIFT-2.1.1: Tue Apr 19 21:09:13 PDT 2005 John Meacham * Be sure to flush handles pointing to files before quitting * fix off-by-one line number error * Remove reference to genconf * Updated GenUtil.hs to fix nhc build DrIFT-2.1.0: * added 'get' utility which creates foo_g for each label which returns its value in a failing monad rather than raising an error. * added 'from' utility which creates fromFoo for each constructors * no longer an error for DERIVEPATH to not be set * added -i to ignore directives in file. useful with -g and -r for testing * added 'Query' which creates from, get, has, and is functions, but in a class with an instance for the data type. useful for extensible data types * added deriving rule for 'Monoid' * added code subdirectory with support code DrIFT-2.0.4: * added rule for GhcBinary * now allows record syntax with newtype DrIFT-2.0.3: * 'update' functions now pass through values without the specified field * drift-ghc script added to be used with ghc by passing -pgmF drift-ghc -F * documentation updated. DrIFT-2.0.2: * deriving 'update' works with newer versions of ghc * fixed bug in Show and Read instances DrIFT-2.0.1: * fixed many warnings in generated code * support for strict fields in constructors * support for infix constructors DrIFT-2.0rc4: * added help messages and categorization to rules * small isLiterate fix (TODO use nhcs deliterizer) * prints debug info in comment in chaseImports * code cleanups * integrated some changes from Strafunski 1.7 * autoconf/automake improvements DrIFT-2.0rc3: * fixed version strings in generated files * fix dependency generation * fixed example test cases DrIFT-2.0rc2: * autoconf/automake added * texinfo is installed properly now * build system cleanups * works with both nhc and ghc again DrIFT-2.0rc1: * new texinfo documentation, based on Noel's original manual. * new option '-r' to only emit results and not original source * new option '-g' to add new global rules to apply. * cleaned up old non haskell98-compliant hacks jDrIFT-1: * proper GetOpt command line handling, allows you to specify an output file (-o) as well as list all possible derivations this version was compiled with (-l) * proper LINE pragmas in generated code * smarter error handling in some cases, parser smarter sometimes. * can deal with/ignore as appropriate some ghc extensions now * rules to derive Typeable used by the Dynamic library. * rules to derive Observable from the HOOD object debugger * rules to derive Term, a generic term representation for generic computation, can be used in a similar fashion to the Strafunski one, but has a number of advantages, fully lazy constant time implode/explode being the main one. (the external library is compatable with their work.) DrIFT-2.2.3/LICENSE0000644000076400007640000000205410753606147010376 00000000000000Copyright (c) 2002-2007 DrIFT contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. DrIFT-2.2.3/configure0000755000076400007640000027223510753606416011311 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.61 for DrIFT 2.2.3. # # Report bugs to . # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) as_nl=' ' IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH if test "x$CONFIG_SHELL" = x; then if (eval ":") 2>/dev/null; then as_have_required=yes else as_have_required=no fi if test $as_have_required = yes && (eval ": (as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=\$LINENO as_lineno_2=\$LINENO test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } ") 2> /dev/null; then : else as_candidate_shells= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. case $as_dir in /*) for as_base in sh bash ksh sh5; do as_candidate_shells="$as_candidate_shells $as_dir/$as_base" done;; esac done IFS=$as_save_IFS for as_shell in $as_candidate_shells $SHELL; do # Try only shells that exist, to save several forks. if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { ("$as_shell") 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : _ASEOF }; then CONFIG_SHELL=$as_shell as_have_required=yes if { "$as_shell" 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : (as_func_return () { (exit $1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = "$1" ); then : else exitcode=1 echo positional parameters were not saved. fi test $exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } _ASEOF }; then break fi fi done if test "x$CONFIG_SHELL" != x; then for as_var in BASH_ENV ENV do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done export CONFIG_SHELL exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi if test $as_have_required = no; then echo This script requires a shell more modern than all the echo shells that I found on your system. Please install a echo modern shell, or manually run the script under such a echo shell if you do have one. { (exit 1); exit 1; } fi fi fi (eval "as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0") || { echo No shell found that supports shell functions. echo Please tell autoconf@gnu.org about your system, echo including any error possibly output before this echo message } as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir fi echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='DrIFT' PACKAGE_TARNAME='DrIFT' PACKAGE_VERSION='2.2.3' PACKAGE_STRING='DrIFT 2.2.3' PACKAGE_BUGREPORT='john@repetae.net' ac_unique_file="src/DrIFT.hs" ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datarootdir datadir sysconfdir sharedstatedir localstatedir includedir oldincludedir docdir infodir htmldir dvidir pdfdir psdir libdir localedir mandir DEFS ECHO_C ECHO_N ECHO_T LIBS build_alias host_alias target_alias INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA am__isrc CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar HC SH HCFLAGS LIBOBJS LTLIBOBJS' ac_subst_files='' ac_precious_vars='build_alias host_alias target_alias' # Initialize some variables set by options. ac_init_help= ac_init_version=false # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` eval enable_$ac_feature=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` eval enable_$ac_feature=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package | sed 's/[-.]/_/g'` eval with_$ac_package=\$ac_optarg ;; -without-* | --without-*) ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package | sed 's/[-.]/_/g'` eval with_$ac_package=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) { echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi # Be sure to have absolute directory names. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; } done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || { echo "$as_me: error: Working directory cannot be determined" >&2 { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || { echo "$as_me: error: pwd does not report name of working directory" >&2 { (exit 1); exit 1; }; } # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$0" || $as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$0" : 'X\(//\)[^/]' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X"$0" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2 { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures DrIFT 2.2.3 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/DrIFT] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of DrIFT 2.2.3:";; esac cat <<\_ACEOF Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-hc= Specify location of ghc. --with-hcflags=HCFLAGS specify flags for Haskell compiler Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF DrIFT configure 2.2.3 generated by GNU Autoconf 2.61 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by DrIFT $as_me 2.2.3, which was generated by GNU Autoconf 2.61. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac done done $as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } $as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo cat <<\_ASBOX ## ---------------- ## ## Cache variables. ## ## ---------------- ## _ASBOX echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo cat <<\_ASBOX ## ----------------- ## ## Output variables. ## ## ----------------- ## _ASBOX echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then cat <<\_ASBOX ## ------------------- ## ## File substitutions. ## ## ------------------- ## _ASBOX echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then cat <<\_ASBOX ## ----------- ## ## confdefs.h. ## ## ----------- ## _ASBOX echo cat confdefs.h echo fi test "$ac_signal" != 0 && echo "$as_me: caught signal $ac_signal" echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer explicitly selected file to automatically selected ones. if test -n "$CONFIG_SITE"; then set x "$CONFIG_SITE" elif test "x$prefix" != xNONE; then set x "$prefix/share/config.site" "$prefix/etc/config.site" else set x "$ac_default_prefix/share/config.site" \ "$ac_default_prefix/etc/config.site" fi shift for ac_site_file do if test -r "$ac_site_file"; then { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then { echo "$as_me:$LINENO: loading cache $cache_file" >&5 echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { echo "$as_me:$LINENO: creating cache $cache_file" >&5 echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 echo "$as_me: former value: $ac_old_val" >&2;} { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 echo "$as_me: current value: $ac_new_val" >&2;} ac_cache_corrupted=: fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 echo "$as_me: error: changes in the environment can compromise the build" >&2;} { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in ac-macros "$srcdir"/ac-macros; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in ac-macros \"$srcdir\"/ac-macros" >&5 echo "$as_me: error: cannot find install-sh or install.sh in ac-macros \"$srcdir\"/ac-macros" >&2;} { (exit 1); exit 1; }; } fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. am__api_version='1.10' # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. { echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi done done ;; esac done IFS=$as_save_IFS fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { echo "$as_me:$LINENO: result: $INSTALL" >&5 echo "${ECHO_T}$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { echo "$as_me:$LINENO: checking whether build environment is sane" >&5 echo $ECHO_N "checking whether build environment is sane... $ECHO_C" >&6; } # Just in case sleep 1 echo timestamp > conftest.file # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt $srcdir/configure conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t $srcdir/configure conftest.file` fi rm -f conftest.file if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". { { echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&5 echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&2;} { (exit 1); exit 1; }; } fi test "$2" = conftest.file ) then # Ok. : else { { echo "$as_me:$LINENO: error: newly created file is older than distributed files! Check your system clock" >&5 echo "$as_me: error: newly created file is older than distributed files! Check your system clock" >&2;} { (exit 1); exit 1; }; } fi { echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6; } test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. echo might interpret backslashes. # By default was `s,x,x', remove it if useless. cat <<\_ACEOF >conftest.sed s/[\\$]/&&/g;s/;s,x,x,$// _ACEOF program_transform_name=`echo $program_transform_name | sed -f conftest.sed` rm -f conftest.sed # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` test x"${MISSING+set}" = xset || MISSING="\${SHELL} $am_aux_dir/missing" # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= { echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5 echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} fi { echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5 echo $ECHO_N "checking for a thread-safe mkdir -p... $ECHO_C" >&6; } if test -z "$MKDIR_P"; then if test "${ac_cv_path_mkdir+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. test -d ./--version && rmdir ./--version MKDIR_P="$ac_install_sh -d" fi fi { echo "$as_me:$LINENO: result: $MKDIR_P" >&5 echo "${ECHO_T}$MKDIR_P" >&6; } mkdir_p="$MKDIR_P" case $mkdir_p in [\\/$]* | ?:[\\/]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_prog_AWK+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AWK="$ac_prog" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { echo "$as_me:$LINENO: result: $AWK" >&5 echo "${ECHO_T}$AWK" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi test -n "$AWK" && break done { echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 echo $ECHO_N "checking whether ${MAKE-make} sets \$(MAKE)... $ECHO_C" >&6; } set x ${MAKE-make}; ac_make=`echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then echo $ECHO_N "(cached) $ECHO_C" >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { echo "$as_me:$LINENO: result: yes" >&5 echo "${ECHO_T}yes" >&6; } SET_MAKE= else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then { { echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;} { (exit 1); exit 1; }; } fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='DrIFT' VERSION='2.2.3' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} install_sh=${install_sh-"\$(SHELL) $am_aux_dir/install-sh"} # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { echo "$as_me:$LINENO: result: $STRIP" >&5 echo "${ECHO_T}$STRIP" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 echo "${ECHO_T}$ac_ct_STRIP" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" # We need awk for the "check" target. The system "awk" is bad on # some platforms. # Always define AMTAR for backward compatibility. AMTAR=${AMTAR-"${am_missing_run}tar"} am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' #AC_CANONICAL_HOST #AC_PROG_CC ACLOCAL="${ACLOCAL} -I ./ac-macros" # Check whether --with-hc was given. if test "${with_hc+set}" = set; then withval=$with_hc; HC=${withval} else for ac_prog in ghc do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_path_HC+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $HC in [\\/]* | ?:[\\/]*) ac_cv_path_HC="$HC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_HC="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi HC=$ac_cv_path_HC if test -n "$HC"; then { echo "$as_me:$LINENO: result: $HC" >&5 echo "${ECHO_T}$HC" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi test -n "$HC" && break done test -n "$HC" || HC=" { { echo "$as_me:$LINENO: error: Required \"hc\" program not found." >&5 echo "$as_me: error: Required \"hc\" program not found." >&2;} { (exit 1); exit 1; }; } " fi # Check whether --with-hcflags was given. if test "${with_hcflags+set}" = set; then withval=$with_hcflags; HCFLAGS=$withval fi # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. { echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi done done ;; esac done IFS=$as_save_IFS fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { echo "$as_me:$LINENO: result: $INSTALL" >&5 echo "${ECHO_T}$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' for ac_prog in sh do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_path_SH+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $SH in [\\/]* | ?:[\\/]*) ac_cv_path_SH="$SH" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_SH="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi SH=$ac_cv_path_SH if test -n "$SH"; then { echo "$as_me:$LINENO: result: $SH" >&5 echo "${ECHO_T}$SH" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi test -n "$SH" && break done ac_config_files="$ac_config_files Makefile src/Makefile example/Makefile src/Version.hs DrIFT.spec drift-ghc" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && { echo "$as_me:$LINENO: updating cache $cache_file" >&5 echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. ac_script=' t clear :clear s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g t quote s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g t quote b any :quote s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g s/\[/\\&/g s/\]/\\&/g s/\$/$$/g H :any ${ g s/^\n// s/\n/ /g p } ' DEFS=`sed -n "$ac_script" confdefs.h` ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : ${CONFIG_STATUS=./config.status} ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 echo "$as_me: creating $CONFIG_STATUS" >&6;} cat >$CONFIG_STATUS <<_ACEOF #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) as_nl=' ' IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir fi echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 # Save the log message, to keep $[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by DrIFT $as_me 2.2.3, which was generated by GNU Autoconf 2.61. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF cat >>$CONFIG_STATUS <<_ACEOF # Files that config.status was made for. config_files="$ac_config_files" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF ac_cs_usage="\ \`$as_me' instantiates files from templates according to the current configuration. Usage: $0 [OPTIONS] [FILE]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_cs_version="\\ DrIFT config.status 2.2.3 configured by $0, generated by GNU Autoconf 2.61, with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" Copyright (C) 2006 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # If no file are specified by the user, then we need to provide default # value. By we need to know if files were specified by the user. ac_need_defaults=: while test $# != 0 do case $1 in --*=*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) echo "$ac_cs_version"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift CONFIG_FILES="$CONFIG_FILES $ac_optarg" ac_need_defaults=false;; --he | --h | --help | --hel | -h ) echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) { echo "$as_me: error: unrecognized option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *) ac_config_targets="$ac_config_targets $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF if \$ac_cs_recheck; then echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 CONFIG_SHELL=$SHELL export CONFIG_SHELL exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "example/Makefile") CONFIG_FILES="$CONFIG_FILES example/Makefile" ;; "src/Version.hs") CONFIG_FILES="$CONFIG_FILES src/Version.hs" ;; "DrIFT.spec") CONFIG_FILES="$CONFIG_FILES DrIFT.spec" ;; "drift-ghc") CONFIG_FILES="$CONFIG_FILES drift-ghc" ;; *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 echo "$as_me: error: invalid argument: $ac_config_target" >&2;} { (exit 1); exit 1; }; };; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || { echo "$me: cannot create a temporary directory in ." >&2 { (exit 1); exit 1; } } # # Set up the sed scripts for CONFIG_FILES section. # # No need to generate the scripts if there are no CONFIG_FILES. # This happens for instance when ./config.status config.h if test -n "$CONFIG_FILES"; then _ACEOF ac_delim='%!_!# ' for ac_last_try in false false false false false :; do cat >conf$$subs.sed <<_ACEOF SHELL!$SHELL$ac_delim PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim PACKAGE_NAME!$PACKAGE_NAME$ac_delim PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim PACKAGE_STRING!$PACKAGE_STRING$ac_delim PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim exec_prefix!$exec_prefix$ac_delim prefix!$prefix$ac_delim program_transform_name!$program_transform_name$ac_delim bindir!$bindir$ac_delim sbindir!$sbindir$ac_delim libexecdir!$libexecdir$ac_delim datarootdir!$datarootdir$ac_delim datadir!$datadir$ac_delim sysconfdir!$sysconfdir$ac_delim sharedstatedir!$sharedstatedir$ac_delim localstatedir!$localstatedir$ac_delim includedir!$includedir$ac_delim oldincludedir!$oldincludedir$ac_delim docdir!$docdir$ac_delim infodir!$infodir$ac_delim htmldir!$htmldir$ac_delim dvidir!$dvidir$ac_delim pdfdir!$pdfdir$ac_delim psdir!$psdir$ac_delim libdir!$libdir$ac_delim localedir!$localedir$ac_delim mandir!$mandir$ac_delim DEFS!$DEFS$ac_delim ECHO_C!$ECHO_C$ac_delim ECHO_N!$ECHO_N$ac_delim ECHO_T!$ECHO_T$ac_delim LIBS!$LIBS$ac_delim build_alias!$build_alias$ac_delim host_alias!$host_alias$ac_delim target_alias!$target_alias$ac_delim INSTALL_PROGRAM!$INSTALL_PROGRAM$ac_delim INSTALL_SCRIPT!$INSTALL_SCRIPT$ac_delim INSTALL_DATA!$INSTALL_DATA$ac_delim am__isrc!$am__isrc$ac_delim CYGPATH_W!$CYGPATH_W$ac_delim PACKAGE!$PACKAGE$ac_delim VERSION!$VERSION$ac_delim ACLOCAL!$ACLOCAL$ac_delim AUTOCONF!$AUTOCONF$ac_delim AUTOMAKE!$AUTOMAKE$ac_delim AUTOHEADER!$AUTOHEADER$ac_delim MAKEINFO!$MAKEINFO$ac_delim install_sh!$install_sh$ac_delim STRIP!$STRIP$ac_delim INSTALL_STRIP_PROGRAM!$INSTALL_STRIP_PROGRAM$ac_delim mkdir_p!$mkdir_p$ac_delim AWK!$AWK$ac_delim SET_MAKE!$SET_MAKE$ac_delim am__leading_dot!$am__leading_dot$ac_delim AMTAR!$AMTAR$ac_delim am__tar!$am__tar$ac_delim am__untar!$am__untar$ac_delim HC!$HC$ac_delim SH!$SH$ac_delim HCFLAGS!$HCFLAGS$ac_delim LIBOBJS!$LIBOBJS$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 64; then break elif $ac_last_try; then { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` if test -n "$ac_eof"; then ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` ac_eof=`expr $ac_eof + 1` fi cat >>$CONFIG_STATUS <<_ACEOF cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof /@[a-zA-Z_][a-zA-Z_0-9]*@/!b end _ACEOF sed ' s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g s/^/s,@/; s/!/@,|#_!!_#|/ :n t n s/'"$ac_delim"'$/,g/; t s/$/\\/; p N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n ' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF :end s/|#_!!_#|//g CEOF$ac_eof _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/ s/:*\${srcdir}:*/:/ s/:*@srcdir@:*/:/ s/^\([^=]*=[ ]*\):*/\1/ s/:*$// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF fi # test -n "$CONFIG_FILES" for ac_tag in :F $CONFIG_FILES do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 echo "$as_me: error: Invalid tag $ac_tag." >&2;} { (exit 1); exit 1; }; };; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 echo "$as_me: error: cannot find input file: $ac_f" >&2;} { (exit 1); exit 1; }; };; esac ac_file_inputs="$ac_file_inputs $ac_f" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input="Generated from "`IFS=: echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure." if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { echo "$as_me:$LINENO: creating $ac_file" >&5 echo "$as_me: creating $ac_file" >&6;} fi case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin";; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir="$ac_dir" case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= case `sed -n '/datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p ' $ac_file_inputs` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF sed "$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s&@configure_input@&$configure_input&;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} rm -f "$tmp/stdin" case $ac_file in -) cat "$tmp/out"; rm -f "$tmp/out";; *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;; esac ;; esac done # for ac_tag { (exit 0); exit 0; } _ACEOF chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi